root/arch/arm/crypto/aes-ce-glue.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. num_rounds
  2. ce_aes_expandkey
  3. ce_aes_setkey
  4. xts_set_key
  5. ecb_encrypt
  6. ecb_decrypt
  7. cbc_encrypt_walk
  8. cbc_encrypt
  9. cbc_decrypt_walk
  10. cbc_decrypt
  11. cts_cbc_encrypt
  12. cts_cbc_decrypt
  13. ctr_encrypt
  14. ctr_encrypt_one
  15. ctr_encrypt_sync
  16. xts_encrypt
  17. xts_decrypt
  18. aes_exit
  19. aes_init

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * aes-ce-glue.c - wrapper code for ARMv8 AES
   4  *
   5  * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
   6  */
   7 
   8 #include <asm/hwcap.h>
   9 #include <asm/neon.h>
  10 #include <asm/simd.h>
  11 #include <asm/unaligned.h>
  12 #include <crypto/aes.h>
  13 #include <crypto/ctr.h>
  14 #include <crypto/internal/simd.h>
  15 #include <crypto/internal/skcipher.h>
  16 #include <crypto/scatterwalk.h>
  17 #include <linux/cpufeature.h>
  18 #include <linux/module.h>
  19 #include <crypto/xts.h>
  20 
  21 MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
  22 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
  23 MODULE_LICENSE("GPL v2");
  24 
  25 /* defined in aes-ce-core.S */
  26 asmlinkage u32 ce_aes_sub(u32 input);
  27 asmlinkage void ce_aes_invert(void *dst, void *src);
  28 
  29 asmlinkage void ce_aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[],
  30                                    int rounds, int blocks);
  31 asmlinkage void ce_aes_ecb_decrypt(u8 out[], u8 const in[], u32 const rk[],
  32                                    int rounds, int blocks);
  33 
  34 asmlinkage void ce_aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[],
  35                                    int rounds, int blocks, u8 iv[]);
  36 asmlinkage void ce_aes_cbc_decrypt(u8 out[], u8 const in[], u32 const rk[],
  37                                    int rounds, int blocks, u8 iv[]);
  38 asmlinkage void ce_aes_cbc_cts_encrypt(u8 out[], u8 const in[], u32 const rk[],
  39                                    int rounds, int bytes, u8 const iv[]);
  40 asmlinkage void ce_aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[],
  41                                    int rounds, int bytes, u8 const iv[]);
  42 
  43 asmlinkage void ce_aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[],
  44                                    int rounds, int blocks, u8 ctr[]);
  45 
  46 asmlinkage void ce_aes_xts_encrypt(u8 out[], u8 const in[], u32 const rk1[],
  47                                    int rounds, int bytes, u8 iv[],
  48                                    u32 const rk2[], int first);
  49 asmlinkage void ce_aes_xts_decrypt(u8 out[], u8 const in[], u32 const rk1[],
  50                                    int rounds, int bytes, u8 iv[],
  51                                    u32 const rk2[], int first);
  52 
  53 struct aes_block {
  54         u8 b[AES_BLOCK_SIZE];
  55 };
  56 
  57 static int num_rounds(struct crypto_aes_ctx *ctx)
  58 {
  59         /*
  60          * # of rounds specified by AES:
  61          * 128 bit key          10 rounds
  62          * 192 bit key          12 rounds
  63          * 256 bit key          14 rounds
  64          * => n byte key        => 6 + (n/4) rounds
  65          */
  66         return 6 + ctx->key_length / 4;
  67 }
  68 
  69 static int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
  70                             unsigned int key_len)
  71 {
  72         /*
  73          * The AES key schedule round constants
  74          */
  75         static u8 const rcon[] = {
  76                 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36,
  77         };
  78 
  79         u32 kwords = key_len / sizeof(u32);
  80         struct aes_block *key_enc, *key_dec;
  81         int i, j;
  82 
  83         if (key_len != AES_KEYSIZE_128 &&
  84             key_len != AES_KEYSIZE_192 &&
  85             key_len != AES_KEYSIZE_256)
  86                 return -EINVAL;
  87 
  88         ctx->key_length = key_len;
  89         for (i = 0; i < kwords; i++)
  90                 ctx->key_enc[i] = get_unaligned_le32(in_key + i * sizeof(u32));
  91 
  92         kernel_neon_begin();
  93         for (i = 0; i < sizeof(rcon); i++) {
  94                 u32 *rki = ctx->key_enc + (i * kwords);
  95                 u32 *rko = rki + kwords;
  96 
  97                 rko[0] = ror32(ce_aes_sub(rki[kwords - 1]), 8);
  98                 rko[0] = rko[0] ^ rki[0] ^ rcon[i];
  99                 rko[1] = rko[0] ^ rki[1];
 100                 rko[2] = rko[1] ^ rki[2];
 101                 rko[3] = rko[2] ^ rki[3];
 102 
 103                 if (key_len == AES_KEYSIZE_192) {
 104                         if (i >= 7)
 105                                 break;
 106                         rko[4] = rko[3] ^ rki[4];
 107                         rko[5] = rko[4] ^ rki[5];
 108                 } else if (key_len == AES_KEYSIZE_256) {
 109                         if (i >= 6)
 110                                 break;
 111                         rko[4] = ce_aes_sub(rko[3]) ^ rki[4];
 112                         rko[5] = rko[4] ^ rki[5];
 113                         rko[6] = rko[5] ^ rki[6];
 114                         rko[7] = rko[6] ^ rki[7];
 115                 }
 116         }
 117 
 118         /*
 119          * Generate the decryption keys for the Equivalent Inverse Cipher.
 120          * This involves reversing the order of the round keys, and applying
 121          * the Inverse Mix Columns transformation on all but the first and
 122          * the last one.
 123          */
 124         key_enc = (struct aes_block *)ctx->key_enc;
 125         key_dec = (struct aes_block *)ctx->key_dec;
 126         j = num_rounds(ctx);
 127 
 128         key_dec[0] = key_enc[j];
 129         for (i = 1, j--; j > 0; i++, j--)
 130                 ce_aes_invert(key_dec + i, key_enc + j);
 131         key_dec[i] = key_enc[0];
 132 
 133         kernel_neon_end();
 134         return 0;
 135 }
 136 
 137 static int ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
 138                          unsigned int key_len)
 139 {
 140         struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
 141         int ret;
 142 
 143         ret = ce_aes_expandkey(ctx, in_key, key_len);
 144         if (!ret)
 145                 return 0;
 146 
 147         crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 148         return -EINVAL;
 149 }
 150 
 151 struct crypto_aes_xts_ctx {
 152         struct crypto_aes_ctx key1;
 153         struct crypto_aes_ctx __aligned(8) key2;
 154 };
 155 
 156 static int xts_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
 157                        unsigned int key_len)
 158 {
 159         struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 160         int ret;
 161 
 162         ret = xts_verify_key(tfm, in_key, key_len);
 163         if (ret)
 164                 return ret;
 165 
 166         ret = ce_aes_expandkey(&ctx->key1, in_key, key_len / 2);
 167         if (!ret)
 168                 ret = ce_aes_expandkey(&ctx->key2, &in_key[key_len / 2],
 169                                        key_len / 2);
 170         if (!ret)
 171                 return 0;
 172 
 173         crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 174         return -EINVAL;
 175 }
 176 
 177 static int ecb_encrypt(struct skcipher_request *req)
 178 {
 179         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 180         struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
 181         struct skcipher_walk walk;
 182         unsigned int blocks;
 183         int err;
 184 
 185         err = skcipher_walk_virt(&walk, req, false);
 186 
 187         while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
 188                 kernel_neon_begin();
 189                 ce_aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
 190                                    ctx->key_enc, num_rounds(ctx), blocks);
 191                 kernel_neon_end();
 192                 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
 193         }
 194         return err;
 195 }
 196 
 197 static int ecb_decrypt(struct skcipher_request *req)
 198 {
 199         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 200         struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
 201         struct skcipher_walk walk;
 202         unsigned int blocks;
 203         int err;
 204 
 205         err = skcipher_walk_virt(&walk, req, false);
 206 
 207         while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
 208                 kernel_neon_begin();
 209                 ce_aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
 210                                    ctx->key_dec, num_rounds(ctx), blocks);
 211                 kernel_neon_end();
 212                 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
 213         }
 214         return err;
 215 }
 216 
 217 static int cbc_encrypt_walk(struct skcipher_request *req,
 218                             struct skcipher_walk *walk)
 219 {
 220         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 221         struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
 222         unsigned int blocks;
 223         int err = 0;
 224 
 225         while ((blocks = (walk->nbytes / AES_BLOCK_SIZE))) {
 226                 kernel_neon_begin();
 227                 ce_aes_cbc_encrypt(walk->dst.virt.addr, walk->src.virt.addr,
 228                                    ctx->key_enc, num_rounds(ctx), blocks,
 229                                    walk->iv);
 230                 kernel_neon_end();
 231                 err = skcipher_walk_done(walk, walk->nbytes % AES_BLOCK_SIZE);
 232         }
 233         return err;
 234 }
 235 
 236 static int cbc_encrypt(struct skcipher_request *req)
 237 {
 238         struct skcipher_walk walk;
 239         int err;
 240 
 241         err = skcipher_walk_virt(&walk, req, false);
 242         if (err)
 243                 return err;
 244         return cbc_encrypt_walk(req, &walk);
 245 }
 246 
 247 static int cbc_decrypt_walk(struct skcipher_request *req,
 248                             struct skcipher_walk *walk)
 249 {
 250         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 251         struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
 252         unsigned int blocks;
 253         int err = 0;
 254 
 255         while ((blocks = (walk->nbytes / AES_BLOCK_SIZE))) {
 256                 kernel_neon_begin();
 257                 ce_aes_cbc_decrypt(walk->dst.virt.addr, walk->src.virt.addr,
 258                                    ctx->key_dec, num_rounds(ctx), blocks,
 259                                    walk->iv);
 260                 kernel_neon_end();
 261                 err = skcipher_walk_done(walk, walk->nbytes % AES_BLOCK_SIZE);
 262         }
 263         return err;
 264 }
 265 
 266 static int cbc_decrypt(struct skcipher_request *req)
 267 {
 268         struct skcipher_walk walk;
 269         int err;
 270 
 271         err = skcipher_walk_virt(&walk, req, false);
 272         if (err)
 273                 return err;
 274         return cbc_decrypt_walk(req, &walk);
 275 }
 276 
 277 static int cts_cbc_encrypt(struct skcipher_request *req)
 278 {
 279         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 280         struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
 281         int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
 282         struct scatterlist *src = req->src, *dst = req->dst;
 283         struct scatterlist sg_src[2], sg_dst[2];
 284         struct skcipher_request subreq;
 285         struct skcipher_walk walk;
 286         int err;
 287 
 288         skcipher_request_set_tfm(&subreq, tfm);
 289         skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
 290                                       NULL, NULL);
 291 
 292         if (req->cryptlen <= AES_BLOCK_SIZE) {
 293                 if (req->cryptlen < AES_BLOCK_SIZE)
 294                         return -EINVAL;
 295                 cbc_blocks = 1;
 296         }
 297 
 298         if (cbc_blocks > 0) {
 299                 skcipher_request_set_crypt(&subreq, req->src, req->dst,
 300                                            cbc_blocks * AES_BLOCK_SIZE,
 301                                            req->iv);
 302 
 303                 err = skcipher_walk_virt(&walk, &subreq, false) ?:
 304                       cbc_encrypt_walk(&subreq, &walk);
 305                 if (err)
 306                         return err;
 307 
 308                 if (req->cryptlen == AES_BLOCK_SIZE)
 309                         return 0;
 310 
 311                 dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
 312                 if (req->dst != req->src)
 313                         dst = scatterwalk_ffwd(sg_dst, req->dst,
 314                                                subreq.cryptlen);
 315         }
 316 
 317         /* handle ciphertext stealing */
 318         skcipher_request_set_crypt(&subreq, src, dst,
 319                                    req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
 320                                    req->iv);
 321 
 322         err = skcipher_walk_virt(&walk, &subreq, false);
 323         if (err)
 324                 return err;
 325 
 326         kernel_neon_begin();
 327         ce_aes_cbc_cts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
 328                                ctx->key_enc, num_rounds(ctx), walk.nbytes,
 329                                walk.iv);
 330         kernel_neon_end();
 331 
 332         return skcipher_walk_done(&walk, 0);
 333 }
 334 
 335 static int cts_cbc_decrypt(struct skcipher_request *req)
 336 {
 337         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 338         struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
 339         int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
 340         struct scatterlist *src = req->src, *dst = req->dst;
 341         struct scatterlist sg_src[2], sg_dst[2];
 342         struct skcipher_request subreq;
 343         struct skcipher_walk walk;
 344         int err;
 345 
 346         skcipher_request_set_tfm(&subreq, tfm);
 347         skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
 348                                       NULL, NULL);
 349 
 350         if (req->cryptlen <= AES_BLOCK_SIZE) {
 351                 if (req->cryptlen < AES_BLOCK_SIZE)
 352                         return -EINVAL;
 353                 cbc_blocks = 1;
 354         }
 355 
 356         if (cbc_blocks > 0) {
 357                 skcipher_request_set_crypt(&subreq, req->src, req->dst,
 358                                            cbc_blocks * AES_BLOCK_SIZE,
 359                                            req->iv);
 360 
 361                 err = skcipher_walk_virt(&walk, &subreq, false) ?:
 362                       cbc_decrypt_walk(&subreq, &walk);
 363                 if (err)
 364                         return err;
 365 
 366                 if (req->cryptlen == AES_BLOCK_SIZE)
 367                         return 0;
 368 
 369                 dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
 370                 if (req->dst != req->src)
 371                         dst = scatterwalk_ffwd(sg_dst, req->dst,
 372                                                subreq.cryptlen);
 373         }
 374 
 375         /* handle ciphertext stealing */
 376         skcipher_request_set_crypt(&subreq, src, dst,
 377                                    req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
 378                                    req->iv);
 379 
 380         err = skcipher_walk_virt(&walk, &subreq, false);
 381         if (err)
 382                 return err;
 383 
 384         kernel_neon_begin();
 385         ce_aes_cbc_cts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
 386                                ctx->key_dec, num_rounds(ctx), walk.nbytes,
 387                                walk.iv);
 388         kernel_neon_end();
 389 
 390         return skcipher_walk_done(&walk, 0);
 391 }
 392 
 393 static int ctr_encrypt(struct skcipher_request *req)
 394 {
 395         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 396         struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
 397         struct skcipher_walk walk;
 398         int err, blocks;
 399 
 400         err = skcipher_walk_virt(&walk, req, false);
 401 
 402         while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
 403                 kernel_neon_begin();
 404                 ce_aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
 405                                    ctx->key_enc, num_rounds(ctx), blocks,
 406                                    walk.iv);
 407                 kernel_neon_end();
 408                 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
 409         }
 410         if (walk.nbytes) {
 411                 u8 __aligned(8) tail[AES_BLOCK_SIZE];
 412                 unsigned int nbytes = walk.nbytes;
 413                 u8 *tdst = walk.dst.virt.addr;
 414                 u8 *tsrc = walk.src.virt.addr;
 415 
 416                 /*
 417                  * Tell aes_ctr_encrypt() to process a tail block.
 418                  */
 419                 blocks = -1;
 420 
 421                 kernel_neon_begin();
 422                 ce_aes_ctr_encrypt(tail, NULL, ctx->key_enc, num_rounds(ctx),
 423                                    blocks, walk.iv);
 424                 kernel_neon_end();
 425                 crypto_xor_cpy(tdst, tsrc, tail, nbytes);
 426                 err = skcipher_walk_done(&walk, 0);
 427         }
 428         return err;
 429 }
 430 
 431 static void ctr_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst)
 432 {
 433         struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
 434         unsigned long flags;
 435 
 436         /*
 437          * Temporarily disable interrupts to avoid races where
 438          * cachelines are evicted when the CPU is interrupted
 439          * to do something else.
 440          */
 441         local_irq_save(flags);
 442         aes_encrypt(ctx, dst, src);
 443         local_irq_restore(flags);
 444 }
 445 
 446 static int ctr_encrypt_sync(struct skcipher_request *req)
 447 {
 448         if (!crypto_simd_usable())
 449                 return crypto_ctr_encrypt_walk(req, ctr_encrypt_one);
 450 
 451         return ctr_encrypt(req);
 452 }
 453 
 454 static int xts_encrypt(struct skcipher_request *req)
 455 {
 456         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 457         struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 458         int err, first, rounds = num_rounds(&ctx->key1);
 459         int tail = req->cryptlen % AES_BLOCK_SIZE;
 460         struct scatterlist sg_src[2], sg_dst[2];
 461         struct skcipher_request subreq;
 462         struct scatterlist *src, *dst;
 463         struct skcipher_walk walk;
 464 
 465         if (req->cryptlen < AES_BLOCK_SIZE)
 466                 return -EINVAL;
 467 
 468         err = skcipher_walk_virt(&walk, req, false);
 469 
 470         if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
 471                 int xts_blocks = DIV_ROUND_UP(req->cryptlen,
 472                                               AES_BLOCK_SIZE) - 2;
 473 
 474                 skcipher_walk_abort(&walk);
 475 
 476                 skcipher_request_set_tfm(&subreq, tfm);
 477                 skcipher_request_set_callback(&subreq,
 478                                               skcipher_request_flags(req),
 479                                               NULL, NULL);
 480                 skcipher_request_set_crypt(&subreq, req->src, req->dst,
 481                                            xts_blocks * AES_BLOCK_SIZE,
 482                                            req->iv);
 483                 req = &subreq;
 484                 err = skcipher_walk_virt(&walk, req, false);
 485         } else {
 486                 tail = 0;
 487         }
 488 
 489         for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) {
 490                 int nbytes = walk.nbytes;
 491 
 492                 if (walk.nbytes < walk.total)
 493                         nbytes &= ~(AES_BLOCK_SIZE - 1);
 494 
 495                 kernel_neon_begin();
 496                 ce_aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
 497                                    ctx->key1.key_enc, rounds, nbytes, walk.iv,
 498                                    ctx->key2.key_enc, first);
 499                 kernel_neon_end();
 500                 err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
 501         }
 502 
 503         if (err || likely(!tail))
 504                 return err;
 505 
 506         dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
 507         if (req->dst != req->src)
 508                 dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
 509 
 510         skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
 511                                    req->iv);
 512 
 513         err = skcipher_walk_virt(&walk, req, false);
 514         if (err)
 515                 return err;
 516 
 517         kernel_neon_begin();
 518         ce_aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
 519                            ctx->key1.key_enc, rounds, walk.nbytes, walk.iv,
 520                            ctx->key2.key_enc, first);
 521         kernel_neon_end();
 522 
 523         return skcipher_walk_done(&walk, 0);
 524 }
 525 
 526 static int xts_decrypt(struct skcipher_request *req)
 527 {
 528         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 529         struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 530         int err, first, rounds = num_rounds(&ctx->key1);
 531         int tail = req->cryptlen % AES_BLOCK_SIZE;
 532         struct scatterlist sg_src[2], sg_dst[2];
 533         struct skcipher_request subreq;
 534         struct scatterlist *src, *dst;
 535         struct skcipher_walk walk;
 536 
 537         if (req->cryptlen < AES_BLOCK_SIZE)
 538                 return -EINVAL;
 539 
 540         err = skcipher_walk_virt(&walk, req, false);
 541 
 542         if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
 543                 int xts_blocks = DIV_ROUND_UP(req->cryptlen,
 544                                               AES_BLOCK_SIZE) - 2;
 545 
 546                 skcipher_walk_abort(&walk);
 547 
 548                 skcipher_request_set_tfm(&subreq, tfm);
 549                 skcipher_request_set_callback(&subreq,
 550                                               skcipher_request_flags(req),
 551                                               NULL, NULL);
 552                 skcipher_request_set_crypt(&subreq, req->src, req->dst,
 553                                            xts_blocks * AES_BLOCK_SIZE,
 554                                            req->iv);
 555                 req = &subreq;
 556                 err = skcipher_walk_virt(&walk, req, false);
 557         } else {
 558                 tail = 0;
 559         }
 560 
 561         for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) {
 562                 int nbytes = walk.nbytes;
 563 
 564                 if (walk.nbytes < walk.total)
 565                         nbytes &= ~(AES_BLOCK_SIZE - 1);
 566 
 567                 kernel_neon_begin();
 568                 ce_aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
 569                                    ctx->key1.key_dec, rounds, nbytes, walk.iv,
 570                                    ctx->key2.key_enc, first);
 571                 kernel_neon_end();
 572                 err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
 573         }
 574 
 575         if (err || likely(!tail))
 576                 return err;
 577 
 578         dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
 579         if (req->dst != req->src)
 580                 dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
 581 
 582         skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
 583                                    req->iv);
 584 
 585         err = skcipher_walk_virt(&walk, req, false);
 586         if (err)
 587                 return err;
 588 
 589         kernel_neon_begin();
 590         ce_aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
 591                            ctx->key1.key_dec, rounds, walk.nbytes, walk.iv,
 592                            ctx->key2.key_enc, first);
 593         kernel_neon_end();
 594 
 595         return skcipher_walk_done(&walk, 0);
 596 }
 597 
 598 static struct skcipher_alg aes_algs[] = { {
 599         .base.cra_name          = "__ecb(aes)",
 600         .base.cra_driver_name   = "__ecb-aes-ce",
 601         .base.cra_priority      = 300,
 602         .base.cra_flags         = CRYPTO_ALG_INTERNAL,
 603         .base.cra_blocksize     = AES_BLOCK_SIZE,
 604         .base.cra_ctxsize       = sizeof(struct crypto_aes_ctx),
 605         .base.cra_module        = THIS_MODULE,
 606 
 607         .min_keysize            = AES_MIN_KEY_SIZE,
 608         .max_keysize            = AES_MAX_KEY_SIZE,
 609         .setkey                 = ce_aes_setkey,
 610         .encrypt                = ecb_encrypt,
 611         .decrypt                = ecb_decrypt,
 612 }, {
 613         .base.cra_name          = "__cbc(aes)",
 614         .base.cra_driver_name   = "__cbc-aes-ce",
 615         .base.cra_priority      = 300,
 616         .base.cra_flags         = CRYPTO_ALG_INTERNAL,
 617         .base.cra_blocksize     = AES_BLOCK_SIZE,
 618         .base.cra_ctxsize       = sizeof(struct crypto_aes_ctx),
 619         .base.cra_module        = THIS_MODULE,
 620 
 621         .min_keysize            = AES_MIN_KEY_SIZE,
 622         .max_keysize            = AES_MAX_KEY_SIZE,
 623         .ivsize                 = AES_BLOCK_SIZE,
 624         .setkey                 = ce_aes_setkey,
 625         .encrypt                = cbc_encrypt,
 626         .decrypt                = cbc_decrypt,
 627 }, {
 628         .base.cra_name          = "__cts(cbc(aes))",
 629         .base.cra_driver_name   = "__cts-cbc-aes-ce",
 630         .base.cra_priority      = 300,
 631         .base.cra_flags         = CRYPTO_ALG_INTERNAL,
 632         .base.cra_blocksize     = AES_BLOCK_SIZE,
 633         .base.cra_ctxsize       = sizeof(struct crypto_aes_ctx),
 634         .base.cra_module        = THIS_MODULE,
 635 
 636         .min_keysize            = AES_MIN_KEY_SIZE,
 637         .max_keysize            = AES_MAX_KEY_SIZE,
 638         .ivsize                 = AES_BLOCK_SIZE,
 639         .walksize               = 2 * AES_BLOCK_SIZE,
 640         .setkey                 = ce_aes_setkey,
 641         .encrypt                = cts_cbc_encrypt,
 642         .decrypt                = cts_cbc_decrypt,
 643 }, {
 644         .base.cra_name          = "__ctr(aes)",
 645         .base.cra_driver_name   = "__ctr-aes-ce",
 646         .base.cra_priority      = 300,
 647         .base.cra_flags         = CRYPTO_ALG_INTERNAL,
 648         .base.cra_blocksize     = 1,
 649         .base.cra_ctxsize       = sizeof(struct crypto_aes_ctx),
 650         .base.cra_module        = THIS_MODULE,
 651 
 652         .min_keysize            = AES_MIN_KEY_SIZE,
 653         .max_keysize            = AES_MAX_KEY_SIZE,
 654         .ivsize                 = AES_BLOCK_SIZE,
 655         .chunksize              = AES_BLOCK_SIZE,
 656         .setkey                 = ce_aes_setkey,
 657         .encrypt                = ctr_encrypt,
 658         .decrypt                = ctr_encrypt,
 659 }, {
 660         .base.cra_name          = "ctr(aes)",
 661         .base.cra_driver_name   = "ctr-aes-ce-sync",
 662         .base.cra_priority      = 300 - 1,
 663         .base.cra_blocksize     = 1,
 664         .base.cra_ctxsize       = sizeof(struct crypto_aes_ctx),
 665         .base.cra_module        = THIS_MODULE,
 666 
 667         .min_keysize            = AES_MIN_KEY_SIZE,
 668         .max_keysize            = AES_MAX_KEY_SIZE,
 669         .ivsize                 = AES_BLOCK_SIZE,
 670         .chunksize              = AES_BLOCK_SIZE,
 671         .setkey                 = ce_aes_setkey,
 672         .encrypt                = ctr_encrypt_sync,
 673         .decrypt                = ctr_encrypt_sync,
 674 }, {
 675         .base.cra_name          = "__xts(aes)",
 676         .base.cra_driver_name   = "__xts-aes-ce",
 677         .base.cra_priority      = 300,
 678         .base.cra_flags         = CRYPTO_ALG_INTERNAL,
 679         .base.cra_blocksize     = AES_BLOCK_SIZE,
 680         .base.cra_ctxsize       = sizeof(struct crypto_aes_xts_ctx),
 681         .base.cra_module        = THIS_MODULE,
 682 
 683         .min_keysize            = 2 * AES_MIN_KEY_SIZE,
 684         .max_keysize            = 2 * AES_MAX_KEY_SIZE,
 685         .ivsize                 = AES_BLOCK_SIZE,
 686         .walksize               = 2 * AES_BLOCK_SIZE,
 687         .setkey                 = xts_set_key,
 688         .encrypt                = xts_encrypt,
 689         .decrypt                = xts_decrypt,
 690 } };
 691 
 692 static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)];
 693 
 694 static void aes_exit(void)
 695 {
 696         int i;
 697 
 698         for (i = 0; i < ARRAY_SIZE(aes_simd_algs) && aes_simd_algs[i]; i++)
 699                 simd_skcipher_free(aes_simd_algs[i]);
 700 
 701         crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
 702 }
 703 
 704 static int __init aes_init(void)
 705 {
 706         struct simd_skcipher_alg *simd;
 707         const char *basename;
 708         const char *algname;
 709         const char *drvname;
 710         int err;
 711         int i;
 712 
 713         err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
 714         if (err)
 715                 return err;
 716 
 717         for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
 718                 if (!(aes_algs[i].base.cra_flags & CRYPTO_ALG_INTERNAL))
 719                         continue;
 720 
 721                 algname = aes_algs[i].base.cra_name + 2;
 722                 drvname = aes_algs[i].base.cra_driver_name + 2;
 723                 basename = aes_algs[i].base.cra_driver_name;
 724                 simd = simd_skcipher_create_compat(algname, drvname, basename);
 725                 err = PTR_ERR(simd);
 726                 if (IS_ERR(simd))
 727                         goto unregister_simds;
 728 
 729                 aes_simd_algs[i] = simd;
 730         }
 731 
 732         return 0;
 733 
 734 unregister_simds:
 735         aes_exit();
 736         return err;
 737 }
 738 
 739 module_cpu_feature_match(AES, aes_init);
 740 module_exit(aes_exit);

/* [<][>][^][v][top][bottom][index][help] */