root/crypto/blkcipher.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. blkcipher_map_src
  2. blkcipher_map_dst
  3. blkcipher_unmap_src
  4. blkcipher_unmap_dst
  5. blkcipher_get_spot
  6. blkcipher_done_slow
  7. blkcipher_done_fast
  8. blkcipher_walk_done
  9. blkcipher_next_slow
  10. blkcipher_next_copy
  11. blkcipher_next_fast
  12. blkcipher_walk_next
  13. blkcipher_copy_iv
  14. blkcipher_walk_virt
  15. blkcipher_walk_phys
  16. blkcipher_walk_first
  17. blkcipher_walk_virt_block
  18. blkcipher_aead_walk_virt_block
  19. setkey_unaligned
  20. setkey
  21. async_setkey
  22. async_encrypt
  23. async_decrypt
  24. crypto_blkcipher_ctxsize
  25. crypto_init_blkcipher_ops_async
  26. crypto_init_blkcipher_ops_sync
  27. crypto_init_blkcipher_ops
  28. crypto_blkcipher_report
  29. crypto_blkcipher_report
  30. crypto_blkcipher_show

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * Block chaining cipher operations.
   4  *
   5  * Generic encrypt/decrypt wrapper for ciphers, handles operations across
   6  * multiple page boundaries by using temporary blocks.  In user context,
   7  * the kernel is given a chance to schedule us once per page.
   8  *
   9  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  10  */
  11 
  12 #include <crypto/aead.h>
  13 #include <crypto/internal/skcipher.h>
  14 #include <crypto/scatterwalk.h>
  15 #include <linux/errno.h>
  16 #include <linux/kernel.h>
  17 #include <linux/module.h>
  18 #include <linux/seq_file.h>
  19 #include <linux/slab.h>
  20 #include <linux/string.h>
  21 #include <linux/cryptouser.h>
  22 #include <linux/compiler.h>
  23 #include <net/netlink.h>
  24 
  25 #include "internal.h"
  26 
  27 enum {
  28         BLKCIPHER_WALK_PHYS = 1 << 0,
  29         BLKCIPHER_WALK_SLOW = 1 << 1,
  30         BLKCIPHER_WALK_COPY = 1 << 2,
  31         BLKCIPHER_WALK_DIFF = 1 << 3,
  32 };
  33 
  34 static int blkcipher_walk_next(struct blkcipher_desc *desc,
  35                                struct blkcipher_walk *walk);
  36 static int blkcipher_walk_first(struct blkcipher_desc *desc,
  37                                 struct blkcipher_walk *walk);
  38 
  39 static inline void blkcipher_map_src(struct blkcipher_walk *walk)
  40 {
  41         walk->src.virt.addr = scatterwalk_map(&walk->in);
  42 }
  43 
  44 static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
  45 {
  46         walk->dst.virt.addr = scatterwalk_map(&walk->out);
  47 }
  48 
  49 static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
  50 {
  51         scatterwalk_unmap(walk->src.virt.addr);
  52 }
  53 
  54 static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
  55 {
  56         scatterwalk_unmap(walk->dst.virt.addr);
  57 }
  58 
  59 /* Get a spot of the specified length that does not straddle a page.
  60  * The caller needs to ensure that there is enough space for this operation.
  61  */
  62 static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
  63 {
  64         u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
  65         return max(start, end_page);
  66 }
  67 
  68 static inline void blkcipher_done_slow(struct blkcipher_walk *walk,
  69                                        unsigned int bsize)
  70 {
  71         u8 *addr;
  72 
  73         addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
  74         addr = blkcipher_get_spot(addr, bsize);
  75         scatterwalk_copychunks(addr, &walk->out, bsize, 1);
  76 }
  77 
  78 static inline void blkcipher_done_fast(struct blkcipher_walk *walk,
  79                                        unsigned int n)
  80 {
  81         if (walk->flags & BLKCIPHER_WALK_COPY) {
  82                 blkcipher_map_dst(walk);
  83                 memcpy(walk->dst.virt.addr, walk->page, n);
  84                 blkcipher_unmap_dst(walk);
  85         } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
  86                 if (walk->flags & BLKCIPHER_WALK_DIFF)
  87                         blkcipher_unmap_dst(walk);
  88                 blkcipher_unmap_src(walk);
  89         }
  90 
  91         scatterwalk_advance(&walk->in, n);
  92         scatterwalk_advance(&walk->out, n);
  93 }
  94 
  95 int blkcipher_walk_done(struct blkcipher_desc *desc,
  96                         struct blkcipher_walk *walk, int err)
  97 {
  98         unsigned int n; /* bytes processed */
  99         bool more;
 100 
 101         if (unlikely(err < 0))
 102                 goto finish;
 103 
 104         n = walk->nbytes - err;
 105         walk->total -= n;
 106         more = (walk->total != 0);
 107 
 108         if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) {
 109                 blkcipher_done_fast(walk, n);
 110         } else {
 111                 if (WARN_ON(err)) {
 112                         /* unexpected case; didn't process all bytes */
 113                         err = -EINVAL;
 114                         goto finish;
 115                 }
 116                 blkcipher_done_slow(walk, n);
 117         }
 118 
 119         scatterwalk_done(&walk->in, 0, more);
 120         scatterwalk_done(&walk->out, 1, more);
 121 
 122         if (more) {
 123                 crypto_yield(desc->flags);
 124                 return blkcipher_walk_next(desc, walk);
 125         }
 126         err = 0;
 127 finish:
 128         walk->nbytes = 0;
 129         if (walk->iv != desc->info)
 130                 memcpy(desc->info, walk->iv, walk->ivsize);
 131         if (walk->buffer != walk->page)
 132                 kfree(walk->buffer);
 133         if (walk->page)
 134                 free_page((unsigned long)walk->page);
 135         return err;
 136 }
 137 EXPORT_SYMBOL_GPL(blkcipher_walk_done);
 138 
 139 static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
 140                                       struct blkcipher_walk *walk,
 141                                       unsigned int bsize,
 142                                       unsigned int alignmask)
 143 {
 144         unsigned int n;
 145         unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
 146 
 147         if (walk->buffer)
 148                 goto ok;
 149 
 150         walk->buffer = walk->page;
 151         if (walk->buffer)
 152                 goto ok;
 153 
 154         n = aligned_bsize * 3 - (alignmask + 1) +
 155             (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
 156         walk->buffer = kmalloc(n, GFP_ATOMIC);
 157         if (!walk->buffer)
 158                 return blkcipher_walk_done(desc, walk, -ENOMEM);
 159 
 160 ok:
 161         walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
 162                                           alignmask + 1);
 163         walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
 164         walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
 165                                                  aligned_bsize, bsize);
 166 
 167         scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
 168 
 169         walk->nbytes = bsize;
 170         walk->flags |= BLKCIPHER_WALK_SLOW;
 171 
 172         return 0;
 173 }
 174 
 175 static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
 176 {
 177         u8 *tmp = walk->page;
 178 
 179         blkcipher_map_src(walk);
 180         memcpy(tmp, walk->src.virt.addr, walk->nbytes);
 181         blkcipher_unmap_src(walk);
 182 
 183         walk->src.virt.addr = tmp;
 184         walk->dst.virt.addr = tmp;
 185 
 186         return 0;
 187 }
 188 
 189 static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
 190                                       struct blkcipher_walk *walk)
 191 {
 192         unsigned long diff;
 193 
 194         walk->src.phys.page = scatterwalk_page(&walk->in);
 195         walk->src.phys.offset = offset_in_page(walk->in.offset);
 196         walk->dst.phys.page = scatterwalk_page(&walk->out);
 197         walk->dst.phys.offset = offset_in_page(walk->out.offset);
 198 
 199         if (walk->flags & BLKCIPHER_WALK_PHYS)
 200                 return 0;
 201 
 202         diff = walk->src.phys.offset - walk->dst.phys.offset;
 203         diff |= walk->src.virt.page - walk->dst.virt.page;
 204 
 205         blkcipher_map_src(walk);
 206         walk->dst.virt.addr = walk->src.virt.addr;
 207 
 208         if (diff) {
 209                 walk->flags |= BLKCIPHER_WALK_DIFF;
 210                 blkcipher_map_dst(walk);
 211         }
 212 
 213         return 0;
 214 }
 215 
 216 static int blkcipher_walk_next(struct blkcipher_desc *desc,
 217                                struct blkcipher_walk *walk)
 218 {
 219         unsigned int bsize;
 220         unsigned int n;
 221         int err;
 222 
 223         n = walk->total;
 224         if (unlikely(n < walk->cipher_blocksize)) {
 225                 desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
 226                 return blkcipher_walk_done(desc, walk, -EINVAL);
 227         }
 228 
 229         bsize = min(walk->walk_blocksize, n);
 230 
 231         walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
 232                          BLKCIPHER_WALK_DIFF);
 233         if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
 234             !scatterwalk_aligned(&walk->out, walk->alignmask)) {
 235                 walk->flags |= BLKCIPHER_WALK_COPY;
 236                 if (!walk->page) {
 237                         walk->page = (void *)__get_free_page(GFP_ATOMIC);
 238                         if (!walk->page)
 239                                 n = 0;
 240                 }
 241         }
 242 
 243         n = scatterwalk_clamp(&walk->in, n);
 244         n = scatterwalk_clamp(&walk->out, n);
 245 
 246         if (unlikely(n < bsize)) {
 247                 err = blkcipher_next_slow(desc, walk, bsize, walk->alignmask);
 248                 goto set_phys_lowmem;
 249         }
 250 
 251         walk->nbytes = n;
 252         if (walk->flags & BLKCIPHER_WALK_COPY) {
 253                 err = blkcipher_next_copy(walk);
 254                 goto set_phys_lowmem;
 255         }
 256 
 257         return blkcipher_next_fast(desc, walk);
 258 
 259 set_phys_lowmem:
 260         if (walk->flags & BLKCIPHER_WALK_PHYS) {
 261                 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
 262                 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
 263                 walk->src.phys.offset &= PAGE_SIZE - 1;
 264                 walk->dst.phys.offset &= PAGE_SIZE - 1;
 265         }
 266         return err;
 267 }
 268 
 269 static inline int blkcipher_copy_iv(struct blkcipher_walk *walk)
 270 {
 271         unsigned bs = walk->walk_blocksize;
 272         unsigned aligned_bs = ALIGN(bs, walk->alignmask + 1);
 273         unsigned int size = aligned_bs * 2 +
 274                             walk->ivsize + max(aligned_bs, walk->ivsize) -
 275                             (walk->alignmask + 1);
 276         u8 *iv;
 277 
 278         size += walk->alignmask & ~(crypto_tfm_ctx_alignment() - 1);
 279         walk->buffer = kmalloc(size, GFP_ATOMIC);
 280         if (!walk->buffer)
 281                 return -ENOMEM;
 282 
 283         iv = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
 284         iv = blkcipher_get_spot(iv, bs) + aligned_bs;
 285         iv = blkcipher_get_spot(iv, bs) + aligned_bs;
 286         iv = blkcipher_get_spot(iv, walk->ivsize);
 287 
 288         walk->iv = memcpy(iv, walk->iv, walk->ivsize);
 289         return 0;
 290 }
 291 
 292 int blkcipher_walk_virt(struct blkcipher_desc *desc,
 293                         struct blkcipher_walk *walk)
 294 {
 295         walk->flags &= ~BLKCIPHER_WALK_PHYS;
 296         walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
 297         walk->cipher_blocksize = walk->walk_blocksize;
 298         walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
 299         walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
 300         return blkcipher_walk_first(desc, walk);
 301 }
 302 EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
 303 
 304 int blkcipher_walk_phys(struct blkcipher_desc *desc,
 305                         struct blkcipher_walk *walk)
 306 {
 307         walk->flags |= BLKCIPHER_WALK_PHYS;
 308         walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
 309         walk->cipher_blocksize = walk->walk_blocksize;
 310         walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
 311         walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
 312         return blkcipher_walk_first(desc, walk);
 313 }
 314 EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
 315 
 316 static int blkcipher_walk_first(struct blkcipher_desc *desc,
 317                                 struct blkcipher_walk *walk)
 318 {
 319         if (WARN_ON_ONCE(in_irq()))
 320                 return -EDEADLK;
 321 
 322         walk->iv = desc->info;
 323         walk->nbytes = walk->total;
 324         if (unlikely(!walk->total))
 325                 return 0;
 326 
 327         walk->buffer = NULL;
 328         if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
 329                 int err = blkcipher_copy_iv(walk);
 330                 if (err)
 331                         return err;
 332         }
 333 
 334         scatterwalk_start(&walk->in, walk->in.sg);
 335         scatterwalk_start(&walk->out, walk->out.sg);
 336         walk->page = NULL;
 337 
 338         return blkcipher_walk_next(desc, walk);
 339 }
 340 
 341 int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
 342                               struct blkcipher_walk *walk,
 343                               unsigned int blocksize)
 344 {
 345         walk->flags &= ~BLKCIPHER_WALK_PHYS;
 346         walk->walk_blocksize = blocksize;
 347         walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm);
 348         walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
 349         walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
 350         return blkcipher_walk_first(desc, walk);
 351 }
 352 EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
 353 
 354 int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
 355                                    struct blkcipher_walk *walk,
 356                                    struct crypto_aead *tfm,
 357                                    unsigned int blocksize)
 358 {
 359         walk->flags &= ~BLKCIPHER_WALK_PHYS;
 360         walk->walk_blocksize = blocksize;
 361         walk->cipher_blocksize = crypto_aead_blocksize(tfm);
 362         walk->ivsize = crypto_aead_ivsize(tfm);
 363         walk->alignmask = crypto_aead_alignmask(tfm);
 364         return blkcipher_walk_first(desc, walk);
 365 }
 366 EXPORT_SYMBOL_GPL(blkcipher_aead_walk_virt_block);
 367 
 368 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
 369                             unsigned int keylen)
 370 {
 371         struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
 372         unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
 373         int ret;
 374         u8 *buffer, *alignbuffer;
 375         unsigned long absize;
 376 
 377         absize = keylen + alignmask;
 378         buffer = kmalloc(absize, GFP_ATOMIC);
 379         if (!buffer)
 380                 return -ENOMEM;
 381 
 382         alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
 383         memcpy(alignbuffer, key, keylen);
 384         ret = cipher->setkey(tfm, alignbuffer, keylen);
 385         memset(alignbuffer, 0, keylen);
 386         kfree(buffer);
 387         return ret;
 388 }
 389 
 390 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
 391 {
 392         struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
 393         unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
 394 
 395         if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
 396                 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
 397                 return -EINVAL;
 398         }
 399 
 400         if ((unsigned long)key & alignmask)
 401                 return setkey_unaligned(tfm, key, keylen);
 402 
 403         return cipher->setkey(tfm, key, keylen);
 404 }
 405 
 406 static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 407                         unsigned int keylen)
 408 {
 409         return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
 410 }
 411 
 412 static int async_encrypt(struct ablkcipher_request *req)
 413 {
 414         struct crypto_tfm *tfm = req->base.tfm;
 415         struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
 416         struct blkcipher_desc desc = {
 417                 .tfm = __crypto_blkcipher_cast(tfm),
 418                 .info = req->info,
 419                 .flags = req->base.flags,
 420         };
 421 
 422 
 423         return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
 424 }
 425 
 426 static int async_decrypt(struct ablkcipher_request *req)
 427 {
 428         struct crypto_tfm *tfm = req->base.tfm;
 429         struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
 430         struct blkcipher_desc desc = {
 431                 .tfm = __crypto_blkcipher_cast(tfm),
 432                 .info = req->info,
 433                 .flags = req->base.flags,
 434         };
 435 
 436         return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
 437 }
 438 
 439 static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
 440                                              u32 mask)
 441 {
 442         struct blkcipher_alg *cipher = &alg->cra_blkcipher;
 443         unsigned int len = alg->cra_ctxsize;
 444 
 445         if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
 446             cipher->ivsize) {
 447                 len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
 448                 len += cipher->ivsize;
 449         }
 450 
 451         return len;
 452 }
 453 
 454 static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
 455 {
 456         struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
 457         struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
 458 
 459         crt->setkey = async_setkey;
 460         crt->encrypt = async_encrypt;
 461         crt->decrypt = async_decrypt;
 462         crt->base = __crypto_ablkcipher_cast(tfm);
 463         crt->ivsize = alg->ivsize;
 464 
 465         return 0;
 466 }
 467 
 468 static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
 469 {
 470         struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
 471         struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
 472         unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
 473         unsigned long addr;
 474 
 475         crt->setkey = setkey;
 476         crt->encrypt = alg->encrypt;
 477         crt->decrypt = alg->decrypt;
 478 
 479         addr = (unsigned long)crypto_tfm_ctx(tfm);
 480         addr = ALIGN(addr, align);
 481         addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
 482         crt->iv = (void *)addr;
 483 
 484         return 0;
 485 }
 486 
 487 static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
 488 {
 489         struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
 490 
 491         if (alg->ivsize > PAGE_SIZE / 8)
 492                 return -EINVAL;
 493 
 494         if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
 495                 return crypto_init_blkcipher_ops_sync(tfm);
 496         else
 497                 return crypto_init_blkcipher_ops_async(tfm);
 498 }
 499 
 500 #ifdef CONFIG_NET
 501 static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
 502 {
 503         struct crypto_report_blkcipher rblkcipher;
 504 
 505         memset(&rblkcipher, 0, sizeof(rblkcipher));
 506 
 507         strscpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
 508         strscpy(rblkcipher.geniv, "<default>", sizeof(rblkcipher.geniv));
 509 
 510         rblkcipher.blocksize = alg->cra_blocksize;
 511         rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
 512         rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
 513         rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
 514 
 515         return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
 516                        sizeof(rblkcipher), &rblkcipher);
 517 }
 518 #else
 519 static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
 520 {
 521         return -ENOSYS;
 522 }
 523 #endif
 524 
 525 static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
 526         __maybe_unused;
 527 static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
 528 {
 529         seq_printf(m, "type         : blkcipher\n");
 530         seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
 531         seq_printf(m, "min keysize  : %u\n", alg->cra_blkcipher.min_keysize);
 532         seq_printf(m, "max keysize  : %u\n", alg->cra_blkcipher.max_keysize);
 533         seq_printf(m, "ivsize       : %u\n", alg->cra_blkcipher.ivsize);
 534         seq_printf(m, "geniv        : <default>\n");
 535 }
 536 
 537 const struct crypto_type crypto_blkcipher_type = {
 538         .ctxsize = crypto_blkcipher_ctxsize,
 539         .init = crypto_init_blkcipher_ops,
 540 #ifdef CONFIG_PROC_FS
 541         .show = crypto_blkcipher_show,
 542 #endif
 543         .report = crypto_blkcipher_report,
 544 };
 545 EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
 546 
 547 MODULE_LICENSE("GPL");
 548 MODULE_DESCRIPTION("Generic block chaining cipher type");

/* [<][>][^][v][top][bottom][index][help] */