root/arch/arm64/crypto/ghash-ce-glue.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ghash_init
  2. ghash_do_update
  3. __ghash_update
  4. ghash_update_p8
  5. ghash_update_p64
  6. ghash_final_p8
  7. ghash_final_p64
  8. ghash_reflect
  9. __ghash_setkey
  10. ghash_setkey
  11. num_rounds
  12. gcm_setkey
  13. gcm_setauthsize
  14. gcm_update_mac
  15. gcm_calculate_auth_mac
  16. gcm_final
  17. gcm_encrypt
  18. gcm_decrypt
  19. ghash_ce_mod_init
  20. ghash_ce_mod_exit

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Accelerated GHASH implementation with ARMv8 PMULL instructions.
   4  *
   5  * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org>
   6  */
   7 
   8 #include <asm/neon.h>
   9 #include <asm/simd.h>
  10 #include <asm/unaligned.h>
  11 #include <crypto/aes.h>
  12 #include <crypto/algapi.h>
  13 #include <crypto/b128ops.h>
  14 #include <crypto/gf128mul.h>
  15 #include <crypto/internal/aead.h>
  16 #include <crypto/internal/hash.h>
  17 #include <crypto/internal/simd.h>
  18 #include <crypto/internal/skcipher.h>
  19 #include <crypto/scatterwalk.h>
  20 #include <linux/cpufeature.h>
  21 #include <linux/crypto.h>
  22 #include <linux/module.h>
  23 
  24 MODULE_DESCRIPTION("GHASH and AES-GCM using ARMv8 Crypto Extensions");
  25 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
  26 MODULE_LICENSE("GPL v2");
  27 MODULE_ALIAS_CRYPTO("ghash");
  28 
  29 #define GHASH_BLOCK_SIZE        16
  30 #define GHASH_DIGEST_SIZE       16
  31 #define GCM_IV_SIZE             12
  32 
  33 struct ghash_key {
  34         u64                     h[2];
  35         u64                     h2[2];
  36         u64                     h3[2];
  37         u64                     h4[2];
  38 
  39         be128                   k;
  40 };
  41 
  42 struct ghash_desc_ctx {
  43         u64 digest[GHASH_DIGEST_SIZE/sizeof(u64)];
  44         u8 buf[GHASH_BLOCK_SIZE];
  45         u32 count;
  46 };
  47 
  48 struct gcm_aes_ctx {
  49         struct crypto_aes_ctx   aes_key;
  50         struct ghash_key        ghash_key;
  51 };
  52 
  53 asmlinkage void pmull_ghash_update_p64(int blocks, u64 dg[], const char *src,
  54                                        struct ghash_key const *k,
  55                                        const char *head);
  56 
  57 asmlinkage void pmull_ghash_update_p8(int blocks, u64 dg[], const char *src,
  58                                       struct ghash_key const *k,
  59                                       const char *head);
  60 
  61 asmlinkage void pmull_gcm_encrypt(int blocks, u64 dg[], u8 dst[],
  62                                   const u8 src[], struct ghash_key const *k,
  63                                   u8 ctr[], u32 const rk[], int rounds,
  64                                   u8 ks[]);
  65 
  66 asmlinkage void pmull_gcm_decrypt(int blocks, u64 dg[], u8 dst[],
  67                                   const u8 src[], struct ghash_key const *k,
  68                                   u8 ctr[], u32 const rk[], int rounds);
  69 
  70 asmlinkage void pmull_gcm_encrypt_block(u8 dst[], u8 const src[],
  71                                         u32 const rk[], int rounds);
  72 
  73 static int ghash_init(struct shash_desc *desc)
  74 {
  75         struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
  76 
  77         *ctx = (struct ghash_desc_ctx){};
  78         return 0;
  79 }
  80 
  81 static void ghash_do_update(int blocks, u64 dg[], const char *src,
  82                             struct ghash_key *key, const char *head,
  83                             void (*simd_update)(int blocks, u64 dg[],
  84                                                 const char *src,
  85                                                 struct ghash_key const *k,
  86                                                 const char *head))
  87 {
  88         if (likely(crypto_simd_usable())) {
  89                 kernel_neon_begin();
  90                 simd_update(blocks, dg, src, key, head);
  91                 kernel_neon_end();
  92         } else {
  93                 be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) };
  94 
  95                 do {
  96                         const u8 *in = src;
  97 
  98                         if (head) {
  99                                 in = head;
 100                                 blocks++;
 101                                 head = NULL;
 102                         } else {
 103                                 src += GHASH_BLOCK_SIZE;
 104                         }
 105 
 106                         crypto_xor((u8 *)&dst, in, GHASH_BLOCK_SIZE);
 107                         gf128mul_lle(&dst, &key->k);
 108                 } while (--blocks);
 109 
 110                 dg[0] = be64_to_cpu(dst.b);
 111                 dg[1] = be64_to_cpu(dst.a);
 112         }
 113 }
 114 
 115 /* avoid hogging the CPU for too long */
 116 #define MAX_BLOCKS      (SZ_64K / GHASH_BLOCK_SIZE)
 117 
 118 static int __ghash_update(struct shash_desc *desc, const u8 *src,
 119                           unsigned int len,
 120                           void (*simd_update)(int blocks, u64 dg[],
 121                                               const char *src,
 122                                               struct ghash_key const *k,
 123                                               const char *head))
 124 {
 125         struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
 126         unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
 127 
 128         ctx->count += len;
 129 
 130         if ((partial + len) >= GHASH_BLOCK_SIZE) {
 131                 struct ghash_key *key = crypto_shash_ctx(desc->tfm);
 132                 int blocks;
 133 
 134                 if (partial) {
 135                         int p = GHASH_BLOCK_SIZE - partial;
 136 
 137                         memcpy(ctx->buf + partial, src, p);
 138                         src += p;
 139                         len -= p;
 140                 }
 141 
 142                 blocks = len / GHASH_BLOCK_SIZE;
 143                 len %= GHASH_BLOCK_SIZE;
 144 
 145                 do {
 146                         int chunk = min(blocks, MAX_BLOCKS);
 147 
 148                         ghash_do_update(chunk, ctx->digest, src, key,
 149                                         partial ? ctx->buf : NULL,
 150                                         simd_update);
 151 
 152                         blocks -= chunk;
 153                         src += chunk * GHASH_BLOCK_SIZE;
 154                         partial = 0;
 155                 } while (unlikely(blocks > 0));
 156         }
 157         if (len)
 158                 memcpy(ctx->buf + partial, src, len);
 159         return 0;
 160 }
 161 
 162 static int ghash_update_p8(struct shash_desc *desc, const u8 *src,
 163                            unsigned int len)
 164 {
 165         return __ghash_update(desc, src, len, pmull_ghash_update_p8);
 166 }
 167 
 168 static int ghash_update_p64(struct shash_desc *desc, const u8 *src,
 169                             unsigned int len)
 170 {
 171         return __ghash_update(desc, src, len, pmull_ghash_update_p64);
 172 }
 173 
 174 static int ghash_final_p8(struct shash_desc *desc, u8 *dst)
 175 {
 176         struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
 177         unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
 178 
 179         if (partial) {
 180                 struct ghash_key *key = crypto_shash_ctx(desc->tfm);
 181 
 182                 memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial);
 183 
 184                 ghash_do_update(1, ctx->digest, ctx->buf, key, NULL,
 185                                 pmull_ghash_update_p8);
 186         }
 187         put_unaligned_be64(ctx->digest[1], dst);
 188         put_unaligned_be64(ctx->digest[0], dst + 8);
 189 
 190         *ctx = (struct ghash_desc_ctx){};
 191         return 0;
 192 }
 193 
 194 static int ghash_final_p64(struct shash_desc *desc, u8 *dst)
 195 {
 196         struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
 197         unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
 198 
 199         if (partial) {
 200                 struct ghash_key *key = crypto_shash_ctx(desc->tfm);
 201 
 202                 memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial);
 203 
 204                 ghash_do_update(1, ctx->digest, ctx->buf, key, NULL,
 205                                 pmull_ghash_update_p64);
 206         }
 207         put_unaligned_be64(ctx->digest[1], dst);
 208         put_unaligned_be64(ctx->digest[0], dst + 8);
 209 
 210         *ctx = (struct ghash_desc_ctx){};
 211         return 0;
 212 }
 213 
 214 static void ghash_reflect(u64 h[], const be128 *k)
 215 {
 216         u64 carry = be64_to_cpu(k->a) & BIT(63) ? 1 : 0;
 217 
 218         h[0] = (be64_to_cpu(k->b) << 1) | carry;
 219         h[1] = (be64_to_cpu(k->a) << 1) | (be64_to_cpu(k->b) >> 63);
 220 
 221         if (carry)
 222                 h[1] ^= 0xc200000000000000UL;
 223 }
 224 
 225 static int __ghash_setkey(struct ghash_key *key,
 226                           const u8 *inkey, unsigned int keylen)
 227 {
 228         be128 h;
 229 
 230         /* needed for the fallback */
 231         memcpy(&key->k, inkey, GHASH_BLOCK_SIZE);
 232 
 233         ghash_reflect(key->h, &key->k);
 234 
 235         h = key->k;
 236         gf128mul_lle(&h, &key->k);
 237         ghash_reflect(key->h2, &h);
 238 
 239         gf128mul_lle(&h, &key->k);
 240         ghash_reflect(key->h3, &h);
 241 
 242         gf128mul_lle(&h, &key->k);
 243         ghash_reflect(key->h4, &h);
 244 
 245         return 0;
 246 }
 247 
 248 static int ghash_setkey(struct crypto_shash *tfm,
 249                         const u8 *inkey, unsigned int keylen)
 250 {
 251         struct ghash_key *key = crypto_shash_ctx(tfm);
 252 
 253         if (keylen != GHASH_BLOCK_SIZE) {
 254                 crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 255                 return -EINVAL;
 256         }
 257 
 258         return __ghash_setkey(key, inkey, keylen);
 259 }
 260 
 261 static struct shash_alg ghash_alg[] = {{
 262         .base.cra_name          = "ghash",
 263         .base.cra_driver_name   = "ghash-neon",
 264         .base.cra_priority      = 150,
 265         .base.cra_blocksize     = GHASH_BLOCK_SIZE,
 266         .base.cra_ctxsize       = sizeof(struct ghash_key),
 267         .base.cra_module        = THIS_MODULE,
 268 
 269         .digestsize             = GHASH_DIGEST_SIZE,
 270         .init                   = ghash_init,
 271         .update                 = ghash_update_p8,
 272         .final                  = ghash_final_p8,
 273         .setkey                 = ghash_setkey,
 274         .descsize               = sizeof(struct ghash_desc_ctx),
 275 }, {
 276         .base.cra_name          = "ghash",
 277         .base.cra_driver_name   = "ghash-ce",
 278         .base.cra_priority      = 200,
 279         .base.cra_blocksize     = GHASH_BLOCK_SIZE,
 280         .base.cra_ctxsize       = sizeof(struct ghash_key),
 281         .base.cra_module        = THIS_MODULE,
 282 
 283         .digestsize             = GHASH_DIGEST_SIZE,
 284         .init                   = ghash_init,
 285         .update                 = ghash_update_p64,
 286         .final                  = ghash_final_p64,
 287         .setkey                 = ghash_setkey,
 288         .descsize               = sizeof(struct ghash_desc_ctx),
 289 }};
 290 
 291 static int num_rounds(struct crypto_aes_ctx *ctx)
 292 {
 293         /*
 294          * # of rounds specified by AES:
 295          * 128 bit key          10 rounds
 296          * 192 bit key          12 rounds
 297          * 256 bit key          14 rounds
 298          * => n byte key        => 6 + (n/4) rounds
 299          */
 300         return 6 + ctx->key_length / 4;
 301 }
 302 
 303 static int gcm_setkey(struct crypto_aead *tfm, const u8 *inkey,
 304                       unsigned int keylen)
 305 {
 306         struct gcm_aes_ctx *ctx = crypto_aead_ctx(tfm);
 307         u8 key[GHASH_BLOCK_SIZE];
 308         int ret;
 309 
 310         ret = aes_expandkey(&ctx->aes_key, inkey, keylen);
 311         if (ret) {
 312                 tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
 313                 return -EINVAL;
 314         }
 315 
 316         aes_encrypt(&ctx->aes_key, key, (u8[AES_BLOCK_SIZE]){});
 317 
 318         return __ghash_setkey(&ctx->ghash_key, key, sizeof(be128));
 319 }
 320 
 321 static int gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
 322 {
 323         switch (authsize) {
 324         case 4:
 325         case 8:
 326         case 12 ... 16:
 327                 break;
 328         default:
 329                 return -EINVAL;
 330         }
 331         return 0;
 332 }
 333 
 334 static void gcm_update_mac(u64 dg[], const u8 *src, int count, u8 buf[],
 335                            int *buf_count, struct gcm_aes_ctx *ctx)
 336 {
 337         if (*buf_count > 0) {
 338                 int buf_added = min(count, GHASH_BLOCK_SIZE - *buf_count);
 339 
 340                 memcpy(&buf[*buf_count], src, buf_added);
 341 
 342                 *buf_count += buf_added;
 343                 src += buf_added;
 344                 count -= buf_added;
 345         }
 346 
 347         if (count >= GHASH_BLOCK_SIZE || *buf_count == GHASH_BLOCK_SIZE) {
 348                 int blocks = count / GHASH_BLOCK_SIZE;
 349 
 350                 ghash_do_update(blocks, dg, src, &ctx->ghash_key,
 351                                 *buf_count ? buf : NULL,
 352                                 pmull_ghash_update_p64);
 353 
 354                 src += blocks * GHASH_BLOCK_SIZE;
 355                 count %= GHASH_BLOCK_SIZE;
 356                 *buf_count = 0;
 357         }
 358 
 359         if (count > 0) {
 360                 memcpy(buf, src, count);
 361                 *buf_count = count;
 362         }
 363 }
 364 
 365 static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[])
 366 {
 367         struct crypto_aead *aead = crypto_aead_reqtfm(req);
 368         struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead);
 369         u8 buf[GHASH_BLOCK_SIZE];
 370         struct scatter_walk walk;
 371         u32 len = req->assoclen;
 372         int buf_count = 0;
 373 
 374         scatterwalk_start(&walk, req->src);
 375 
 376         do {
 377                 u32 n = scatterwalk_clamp(&walk, len);
 378                 u8 *p;
 379 
 380                 if (!n) {
 381                         scatterwalk_start(&walk, sg_next(walk.sg));
 382                         n = scatterwalk_clamp(&walk, len);
 383                 }
 384                 p = scatterwalk_map(&walk);
 385 
 386                 gcm_update_mac(dg, p, n, buf, &buf_count, ctx);
 387                 len -= n;
 388 
 389                 scatterwalk_unmap(p);
 390                 scatterwalk_advance(&walk, n);
 391                 scatterwalk_done(&walk, 0, len);
 392         } while (len);
 393 
 394         if (buf_count) {
 395                 memset(&buf[buf_count], 0, GHASH_BLOCK_SIZE - buf_count);
 396                 ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL,
 397                                 pmull_ghash_update_p64);
 398         }
 399 }
 400 
 401 static void gcm_final(struct aead_request *req, struct gcm_aes_ctx *ctx,
 402                       u64 dg[], u8 tag[], int cryptlen)
 403 {
 404         u8 mac[AES_BLOCK_SIZE];
 405         u128 lengths;
 406 
 407         lengths.a = cpu_to_be64(req->assoclen * 8);
 408         lengths.b = cpu_to_be64(cryptlen * 8);
 409 
 410         ghash_do_update(1, dg, (void *)&lengths, &ctx->ghash_key, NULL,
 411                         pmull_ghash_update_p64);
 412 
 413         put_unaligned_be64(dg[1], mac);
 414         put_unaligned_be64(dg[0], mac + 8);
 415 
 416         crypto_xor(tag, mac, AES_BLOCK_SIZE);
 417 }
 418 
 419 static int gcm_encrypt(struct aead_request *req)
 420 {
 421         struct crypto_aead *aead = crypto_aead_reqtfm(req);
 422         struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead);
 423         struct skcipher_walk walk;
 424         u8 iv[AES_BLOCK_SIZE];
 425         u8 ks[2 * AES_BLOCK_SIZE];
 426         u8 tag[AES_BLOCK_SIZE];
 427         u64 dg[2] = {};
 428         int nrounds = num_rounds(&ctx->aes_key);
 429         int err;
 430 
 431         if (req->assoclen)
 432                 gcm_calculate_auth_mac(req, dg);
 433 
 434         memcpy(iv, req->iv, GCM_IV_SIZE);
 435         put_unaligned_be32(1, iv + GCM_IV_SIZE);
 436 
 437         err = skcipher_walk_aead_encrypt(&walk, req, false);
 438 
 439         if (likely(crypto_simd_usable() && walk.total >= 2 * AES_BLOCK_SIZE)) {
 440                 u32 const *rk = NULL;
 441 
 442                 kernel_neon_begin();
 443                 pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, nrounds);
 444                 put_unaligned_be32(2, iv + GCM_IV_SIZE);
 445                 pmull_gcm_encrypt_block(ks, iv, NULL, nrounds);
 446                 put_unaligned_be32(3, iv + GCM_IV_SIZE);
 447                 pmull_gcm_encrypt_block(ks + AES_BLOCK_SIZE, iv, NULL, nrounds);
 448                 put_unaligned_be32(4, iv + GCM_IV_SIZE);
 449 
 450                 do {
 451                         int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
 452 
 453                         if (rk)
 454                                 kernel_neon_begin();
 455 
 456                         pmull_gcm_encrypt(blocks, dg, walk.dst.virt.addr,
 457                                           walk.src.virt.addr, &ctx->ghash_key,
 458                                           iv, rk, nrounds, ks);
 459                         kernel_neon_end();
 460 
 461                         err = skcipher_walk_done(&walk,
 462                                         walk.nbytes % (2 * AES_BLOCK_SIZE));
 463 
 464                         rk = ctx->aes_key.key_enc;
 465                 } while (walk.nbytes >= 2 * AES_BLOCK_SIZE);
 466         } else {
 467                 aes_encrypt(&ctx->aes_key, tag, iv);
 468                 put_unaligned_be32(2, iv + GCM_IV_SIZE);
 469 
 470                 while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
 471                         const int blocks =
 472                                 walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
 473                         u8 *dst = walk.dst.virt.addr;
 474                         u8 *src = walk.src.virt.addr;
 475                         int remaining = blocks;
 476 
 477                         do {
 478                                 aes_encrypt(&ctx->aes_key, ks, iv);
 479                                 crypto_xor_cpy(dst, src, ks, AES_BLOCK_SIZE);
 480                                 crypto_inc(iv, AES_BLOCK_SIZE);
 481 
 482                                 dst += AES_BLOCK_SIZE;
 483                                 src += AES_BLOCK_SIZE;
 484                         } while (--remaining > 0);
 485 
 486                         ghash_do_update(blocks, dg,
 487                                         walk.dst.virt.addr, &ctx->ghash_key,
 488                                         NULL, pmull_ghash_update_p64);
 489 
 490                         err = skcipher_walk_done(&walk,
 491                                                  walk.nbytes % (2 * AES_BLOCK_SIZE));
 492                 }
 493                 if (walk.nbytes) {
 494                         aes_encrypt(&ctx->aes_key, ks, iv);
 495                         if (walk.nbytes > AES_BLOCK_SIZE) {
 496                                 crypto_inc(iv, AES_BLOCK_SIZE);
 497                                 aes_encrypt(&ctx->aes_key, ks + AES_BLOCK_SIZE, iv);
 498                         }
 499                 }
 500         }
 501 
 502         /* handle the tail */
 503         if (walk.nbytes) {
 504                 u8 buf[GHASH_BLOCK_SIZE];
 505                 unsigned int nbytes = walk.nbytes;
 506                 u8 *dst = walk.dst.virt.addr;
 507                 u8 *head = NULL;
 508 
 509                 crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, ks,
 510                                walk.nbytes);
 511 
 512                 if (walk.nbytes > GHASH_BLOCK_SIZE) {
 513                         head = dst;
 514                         dst += GHASH_BLOCK_SIZE;
 515                         nbytes %= GHASH_BLOCK_SIZE;
 516                 }
 517 
 518                 memcpy(buf, dst, nbytes);
 519                 memset(buf + nbytes, 0, GHASH_BLOCK_SIZE - nbytes);
 520                 ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head,
 521                                 pmull_ghash_update_p64);
 522 
 523                 err = skcipher_walk_done(&walk, 0);
 524         }
 525 
 526         if (err)
 527                 return err;
 528 
 529         gcm_final(req, ctx, dg, tag, req->cryptlen);
 530 
 531         /* copy authtag to end of dst */
 532         scatterwalk_map_and_copy(tag, req->dst, req->assoclen + req->cryptlen,
 533                                  crypto_aead_authsize(aead), 1);
 534 
 535         return 0;
 536 }
 537 
 538 static int gcm_decrypt(struct aead_request *req)
 539 {
 540         struct crypto_aead *aead = crypto_aead_reqtfm(req);
 541         struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead);
 542         unsigned int authsize = crypto_aead_authsize(aead);
 543         struct skcipher_walk walk;
 544         u8 iv[2 * AES_BLOCK_SIZE];
 545         u8 tag[AES_BLOCK_SIZE];
 546         u8 buf[2 * GHASH_BLOCK_SIZE];
 547         u64 dg[2] = {};
 548         int nrounds = num_rounds(&ctx->aes_key);
 549         int err;
 550 
 551         if (req->assoclen)
 552                 gcm_calculate_auth_mac(req, dg);
 553 
 554         memcpy(iv, req->iv, GCM_IV_SIZE);
 555         put_unaligned_be32(1, iv + GCM_IV_SIZE);
 556 
 557         err = skcipher_walk_aead_decrypt(&walk, req, false);
 558 
 559         if (likely(crypto_simd_usable() && walk.total >= 2 * AES_BLOCK_SIZE)) {
 560                 u32 const *rk = NULL;
 561 
 562                 kernel_neon_begin();
 563                 pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, nrounds);
 564                 put_unaligned_be32(2, iv + GCM_IV_SIZE);
 565 
 566                 do {
 567                         int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
 568                         int rem = walk.total - blocks * AES_BLOCK_SIZE;
 569 
 570                         if (rk)
 571                                 kernel_neon_begin();
 572 
 573                         pmull_gcm_decrypt(blocks, dg, walk.dst.virt.addr,
 574                                           walk.src.virt.addr, &ctx->ghash_key,
 575                                           iv, rk, nrounds);
 576 
 577                         /* check if this is the final iteration of the loop */
 578                         if (rem < (2 * AES_BLOCK_SIZE)) {
 579                                 u8 *iv2 = iv + AES_BLOCK_SIZE;
 580 
 581                                 if (rem > AES_BLOCK_SIZE) {
 582                                         memcpy(iv2, iv, AES_BLOCK_SIZE);
 583                                         crypto_inc(iv2, AES_BLOCK_SIZE);
 584                                 }
 585 
 586                                 pmull_gcm_encrypt_block(iv, iv, NULL, nrounds);
 587 
 588                                 if (rem > AES_BLOCK_SIZE)
 589                                         pmull_gcm_encrypt_block(iv2, iv2, NULL,
 590                                                                 nrounds);
 591                         }
 592 
 593                         kernel_neon_end();
 594 
 595                         err = skcipher_walk_done(&walk,
 596                                         walk.nbytes % (2 * AES_BLOCK_SIZE));
 597 
 598                         rk = ctx->aes_key.key_enc;
 599                 } while (walk.nbytes >= 2 * AES_BLOCK_SIZE);
 600         } else {
 601                 aes_encrypt(&ctx->aes_key, tag, iv);
 602                 put_unaligned_be32(2, iv + GCM_IV_SIZE);
 603 
 604                 while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
 605                         int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
 606                         u8 *dst = walk.dst.virt.addr;
 607                         u8 *src = walk.src.virt.addr;
 608 
 609                         ghash_do_update(blocks, dg, walk.src.virt.addr,
 610                                         &ctx->ghash_key, NULL,
 611                                         pmull_ghash_update_p64);
 612 
 613                         do {
 614                                 aes_encrypt(&ctx->aes_key, buf, iv);
 615                                 crypto_xor_cpy(dst, src, buf, AES_BLOCK_SIZE);
 616                                 crypto_inc(iv, AES_BLOCK_SIZE);
 617 
 618                                 dst += AES_BLOCK_SIZE;
 619                                 src += AES_BLOCK_SIZE;
 620                         } while (--blocks > 0);
 621 
 622                         err = skcipher_walk_done(&walk,
 623                                                  walk.nbytes % (2 * AES_BLOCK_SIZE));
 624                 }
 625                 if (walk.nbytes) {
 626                         if (walk.nbytes > AES_BLOCK_SIZE) {
 627                                 u8 *iv2 = iv + AES_BLOCK_SIZE;
 628 
 629                                 memcpy(iv2, iv, AES_BLOCK_SIZE);
 630                                 crypto_inc(iv2, AES_BLOCK_SIZE);
 631 
 632                                 aes_encrypt(&ctx->aes_key, iv2, iv2);
 633                         }
 634                         aes_encrypt(&ctx->aes_key, iv, iv);
 635                 }
 636         }
 637 
 638         /* handle the tail */
 639         if (walk.nbytes) {
 640                 const u8 *src = walk.src.virt.addr;
 641                 const u8 *head = NULL;
 642                 unsigned int nbytes = walk.nbytes;
 643 
 644                 if (walk.nbytes > GHASH_BLOCK_SIZE) {
 645                         head = src;
 646                         src += GHASH_BLOCK_SIZE;
 647                         nbytes %= GHASH_BLOCK_SIZE;
 648                 }
 649 
 650                 memcpy(buf, src, nbytes);
 651                 memset(buf + nbytes, 0, GHASH_BLOCK_SIZE - nbytes);
 652                 ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head,
 653                                 pmull_ghash_update_p64);
 654 
 655                 crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, iv,
 656                                walk.nbytes);
 657 
 658                 err = skcipher_walk_done(&walk, 0);
 659         }
 660 
 661         if (err)
 662                 return err;
 663 
 664         gcm_final(req, ctx, dg, tag, req->cryptlen - authsize);
 665 
 666         /* compare calculated auth tag with the stored one */
 667         scatterwalk_map_and_copy(buf, req->src,
 668                                  req->assoclen + req->cryptlen - authsize,
 669                                  authsize, 0);
 670 
 671         if (crypto_memneq(tag, buf, authsize))
 672                 return -EBADMSG;
 673         return 0;
 674 }
 675 
 676 static struct aead_alg gcm_aes_alg = {
 677         .ivsize                 = GCM_IV_SIZE,
 678         .chunksize              = 2 * AES_BLOCK_SIZE,
 679         .maxauthsize            = AES_BLOCK_SIZE,
 680         .setkey                 = gcm_setkey,
 681         .setauthsize            = gcm_setauthsize,
 682         .encrypt                = gcm_encrypt,
 683         .decrypt                = gcm_decrypt,
 684 
 685         .base.cra_name          = "gcm(aes)",
 686         .base.cra_driver_name   = "gcm-aes-ce",
 687         .base.cra_priority      = 300,
 688         .base.cra_blocksize     = 1,
 689         .base.cra_ctxsize       = sizeof(struct gcm_aes_ctx),
 690         .base.cra_module        = THIS_MODULE,
 691 };
 692 
 693 static int __init ghash_ce_mod_init(void)
 694 {
 695         int ret;
 696 
 697         if (!cpu_have_named_feature(ASIMD))
 698                 return -ENODEV;
 699 
 700         if (cpu_have_named_feature(PMULL))
 701                 ret = crypto_register_shashes(ghash_alg,
 702                                               ARRAY_SIZE(ghash_alg));
 703         else
 704                 /* only register the first array element */
 705                 ret = crypto_register_shash(ghash_alg);
 706 
 707         if (ret)
 708                 return ret;
 709 
 710         if (cpu_have_named_feature(PMULL)) {
 711                 ret = crypto_register_aead(&gcm_aes_alg);
 712                 if (ret)
 713                         crypto_unregister_shashes(ghash_alg,
 714                                                   ARRAY_SIZE(ghash_alg));
 715         }
 716         return ret;
 717 }
 718 
 719 static void __exit ghash_ce_mod_exit(void)
 720 {
 721         if (cpu_have_named_feature(PMULL))
 722                 crypto_unregister_shashes(ghash_alg, ARRAY_SIZE(ghash_alg));
 723         else
 724                 crypto_unregister_shash(ghash_alg);
 725         crypto_unregister_aead(&gcm_aes_alg);
 726 }
 727 
 728 static const struct cpu_feature ghash_cpu_feature[] = {
 729         { cpu_feature(PMULL) }, { }
 730 };
 731 MODULE_DEVICE_TABLE(cpu, ghash_cpu_feature);
 732 
 733 module_init(ghash_ce_mod_init);
 734 module_exit(ghash_ce_mod_exit);

/* [<][>][^][v][top][bottom][index][help] */