1/* LRW: as defined by Cyril Guyot in 2 * http://grouper.ieee.org/groups/1619/email/pdf00017.pdf 3 * 4 * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org> 5 * 6 * Based on ecb.c 7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the Free 11 * Software Foundation; either version 2 of the License, or (at your option) 12 * any later version. 13 */ 14/* This implementation is checked against the test vectors in the above 15 * document and by a test vector provided by Ken Buchanan at 16 * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html 17 * 18 * The test vectors are included in the testing module tcrypt.[ch] */ 19 20#include <crypto/algapi.h> 21#include <linux/err.h> 22#include <linux/init.h> 23#include <linux/kernel.h> 24#include <linux/module.h> 25#include <linux/scatterlist.h> 26#include <linux/slab.h> 27 28#include <crypto/b128ops.h> 29#include <crypto/gf128mul.h> 30#include <crypto/lrw.h> 31 32struct priv { 33 struct crypto_cipher *child; 34 struct lrw_table_ctx table; 35}; 36 37static inline void setbit128_bbe(void *b, int bit) 38{ 39 __set_bit(bit ^ (0x80 - 40#ifdef __BIG_ENDIAN 41 BITS_PER_LONG 42#else 43 BITS_PER_BYTE 44#endif 45 ), b); 46} 47 48int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak) 49{ 50 be128 tmp = { 0 }; 51 int i; 52 53 if (ctx->table) 54 gf128mul_free_64k(ctx->table); 55 56 /* initialize multiplication table for Key2 */ 57 ctx->table = gf128mul_init_64k_bbe((be128 *)tweak); 58 if (!ctx->table) 59 return -ENOMEM; 60 61 /* initialize optimization table */ 62 for (i = 0; i < 128; i++) { 63 setbit128_bbe(&tmp, i); 64 ctx->mulinc[i] = tmp; 65 gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table); 66 } 67 68 return 0; 69} 70EXPORT_SYMBOL_GPL(lrw_init_table); 71 72void lrw_free_table(struct lrw_table_ctx *ctx) 73{ 74 if (ctx->table) 75 gf128mul_free_64k(ctx->table); 76} 77EXPORT_SYMBOL_GPL(lrw_free_table); 78 79static int setkey(struct crypto_tfm *parent, const u8 *key, 80 unsigned int keylen) 81{ 82 struct priv *ctx = crypto_tfm_ctx(parent); 83 struct crypto_cipher *child = ctx->child; 84 int err, bsize = LRW_BLOCK_SIZE; 85 const u8 *tweak = key + keylen - bsize; 86 87 crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 88 crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & 89 CRYPTO_TFM_REQ_MASK); 90 err = crypto_cipher_setkey(child, key, keylen - bsize); 91 if (err) 92 return err; 93 crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & 94 CRYPTO_TFM_RES_MASK); 95 96 return lrw_init_table(&ctx->table, tweak); 97} 98 99struct sinfo { 100 be128 t; 101 struct crypto_tfm *tfm; 102 void (*fn)(struct crypto_tfm *, u8 *, const u8 *); 103}; 104 105static inline void inc(be128 *iv) 106{ 107 be64_add_cpu(&iv->b, 1); 108 if (!iv->b) 109 be64_add_cpu(&iv->a, 1); 110} 111 112static inline void lrw_round(struct sinfo *s, void *dst, const void *src) 113{ 114 be128_xor(dst, &s->t, src); /* PP <- T xor P */ 115 s->fn(s->tfm, dst, dst); /* CC <- E(Key2,PP) */ 116 be128_xor(dst, dst, &s->t); /* C <- T xor CC */ 117} 118 119/* this returns the number of consequative 1 bits starting 120 * from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */ 121static inline int get_index128(be128 *block) 122{ 123 int x; 124 __be32 *p = (__be32 *) block; 125 126 for (p += 3, x = 0; x < 128; p--, x += 32) { 127 u32 val = be32_to_cpup(p); 128 129 if (!~val) 130 continue; 131 132 return x + ffz(val); 133 } 134 135 return x; 136} 137 138static int crypt(struct blkcipher_desc *d, 139 struct blkcipher_walk *w, struct priv *ctx, 140 void (*fn)(struct crypto_tfm *, u8 *, const u8 *)) 141{ 142 int err; 143 unsigned int avail; 144 const int bs = LRW_BLOCK_SIZE; 145 struct sinfo s = { 146 .tfm = crypto_cipher_tfm(ctx->child), 147 .fn = fn 148 }; 149 be128 *iv; 150 u8 *wsrc; 151 u8 *wdst; 152 153 err = blkcipher_walk_virt(d, w); 154 if (!(avail = w->nbytes)) 155 return err; 156 157 wsrc = w->src.virt.addr; 158 wdst = w->dst.virt.addr; 159 160 /* calculate first value of T */ 161 iv = (be128 *)w->iv; 162 s.t = *iv; 163 164 /* T <- I*Key2 */ 165 gf128mul_64k_bbe(&s.t, ctx->table.table); 166 167 goto first; 168 169 for (;;) { 170 do { 171 /* T <- I*Key2, using the optimization 172 * discussed in the specification */ 173 be128_xor(&s.t, &s.t, 174 &ctx->table.mulinc[get_index128(iv)]); 175 inc(iv); 176 177first: 178 lrw_round(&s, wdst, wsrc); 179 180 wsrc += bs; 181 wdst += bs; 182 } while ((avail -= bs) >= bs); 183 184 err = blkcipher_walk_done(d, w, avail); 185 if (!(avail = w->nbytes)) 186 break; 187 188 wsrc = w->src.virt.addr; 189 wdst = w->dst.virt.addr; 190 } 191 192 return err; 193} 194 195static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 196 struct scatterlist *src, unsigned int nbytes) 197{ 198 struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); 199 struct blkcipher_walk w; 200 201 blkcipher_walk_init(&w, dst, src, nbytes); 202 return crypt(desc, &w, ctx, 203 crypto_cipher_alg(ctx->child)->cia_encrypt); 204} 205 206static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 207 struct scatterlist *src, unsigned int nbytes) 208{ 209 struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); 210 struct blkcipher_walk w; 211 212 blkcipher_walk_init(&w, dst, src, nbytes); 213 return crypt(desc, &w, ctx, 214 crypto_cipher_alg(ctx->child)->cia_decrypt); 215} 216 217int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst, 218 struct scatterlist *ssrc, unsigned int nbytes, 219 struct lrw_crypt_req *req) 220{ 221 const unsigned int bsize = LRW_BLOCK_SIZE; 222 const unsigned int max_blks = req->tbuflen / bsize; 223 struct lrw_table_ctx *ctx = req->table_ctx; 224 struct blkcipher_walk walk; 225 unsigned int nblocks; 226 be128 *iv, *src, *dst, *t; 227 be128 *t_buf = req->tbuf; 228 int err, i; 229 230 BUG_ON(max_blks < 1); 231 232 blkcipher_walk_init(&walk, sdst, ssrc, nbytes); 233 234 err = blkcipher_walk_virt(desc, &walk); 235 nbytes = walk.nbytes; 236 if (!nbytes) 237 return err; 238 239 nblocks = min(walk.nbytes / bsize, max_blks); 240 src = (be128 *)walk.src.virt.addr; 241 dst = (be128 *)walk.dst.virt.addr; 242 243 /* calculate first value of T */ 244 iv = (be128 *)walk.iv; 245 t_buf[0] = *iv; 246 247 /* T <- I*Key2 */ 248 gf128mul_64k_bbe(&t_buf[0], ctx->table); 249 250 i = 0; 251 goto first; 252 253 for (;;) { 254 do { 255 for (i = 0; i < nblocks; i++) { 256 /* T <- I*Key2, using the optimization 257 * discussed in the specification */ 258 be128_xor(&t_buf[i], t, 259 &ctx->mulinc[get_index128(iv)]); 260 inc(iv); 261first: 262 t = &t_buf[i]; 263 264 /* PP <- T xor P */ 265 be128_xor(dst + i, t, src + i); 266 } 267 268 /* CC <- E(Key2,PP) */ 269 req->crypt_fn(req->crypt_ctx, (u8 *)dst, 270 nblocks * bsize); 271 272 /* C <- T xor CC */ 273 for (i = 0; i < nblocks; i++) 274 be128_xor(dst + i, dst + i, &t_buf[i]); 275 276 src += nblocks; 277 dst += nblocks; 278 nbytes -= nblocks * bsize; 279 nblocks = min(nbytes / bsize, max_blks); 280 } while (nblocks > 0); 281 282 err = blkcipher_walk_done(desc, &walk, nbytes); 283 nbytes = walk.nbytes; 284 if (!nbytes) 285 break; 286 287 nblocks = min(nbytes / bsize, max_blks); 288 src = (be128 *)walk.src.virt.addr; 289 dst = (be128 *)walk.dst.virt.addr; 290 } 291 292 return err; 293} 294EXPORT_SYMBOL_GPL(lrw_crypt); 295 296static int init_tfm(struct crypto_tfm *tfm) 297{ 298 struct crypto_cipher *cipher; 299 struct crypto_instance *inst = (void *)tfm->__crt_alg; 300 struct crypto_spawn *spawn = crypto_instance_ctx(inst); 301 struct priv *ctx = crypto_tfm_ctx(tfm); 302 u32 *flags = &tfm->crt_flags; 303 304 cipher = crypto_spawn_cipher(spawn); 305 if (IS_ERR(cipher)) 306 return PTR_ERR(cipher); 307 308 if (crypto_cipher_blocksize(cipher) != LRW_BLOCK_SIZE) { 309 *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; 310 crypto_free_cipher(cipher); 311 return -EINVAL; 312 } 313 314 ctx->child = cipher; 315 return 0; 316} 317 318static void exit_tfm(struct crypto_tfm *tfm) 319{ 320 struct priv *ctx = crypto_tfm_ctx(tfm); 321 322 lrw_free_table(&ctx->table); 323 crypto_free_cipher(ctx->child); 324} 325 326static struct crypto_instance *alloc(struct rtattr **tb) 327{ 328 struct crypto_instance *inst; 329 struct crypto_alg *alg; 330 int err; 331 332 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); 333 if (err) 334 return ERR_PTR(err); 335 336 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, 337 CRYPTO_ALG_TYPE_MASK); 338 if (IS_ERR(alg)) 339 return ERR_CAST(alg); 340 341 inst = crypto_alloc_instance("lrw", alg); 342 if (IS_ERR(inst)) 343 goto out_put_alg; 344 345 inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; 346 inst->alg.cra_priority = alg->cra_priority; 347 inst->alg.cra_blocksize = alg->cra_blocksize; 348 349 if (alg->cra_alignmask < 7) inst->alg.cra_alignmask = 7; 350 else inst->alg.cra_alignmask = alg->cra_alignmask; 351 inst->alg.cra_type = &crypto_blkcipher_type; 352 353 if (!(alg->cra_blocksize % 4)) 354 inst->alg.cra_alignmask |= 3; 355 inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; 356 inst->alg.cra_blkcipher.min_keysize = 357 alg->cra_cipher.cia_min_keysize + alg->cra_blocksize; 358 inst->alg.cra_blkcipher.max_keysize = 359 alg->cra_cipher.cia_max_keysize + alg->cra_blocksize; 360 361 inst->alg.cra_ctxsize = sizeof(struct priv); 362 363 inst->alg.cra_init = init_tfm; 364 inst->alg.cra_exit = exit_tfm; 365 366 inst->alg.cra_blkcipher.setkey = setkey; 367 inst->alg.cra_blkcipher.encrypt = encrypt; 368 inst->alg.cra_blkcipher.decrypt = decrypt; 369 370out_put_alg: 371 crypto_mod_put(alg); 372 return inst; 373} 374 375static void free(struct crypto_instance *inst) 376{ 377 crypto_drop_spawn(crypto_instance_ctx(inst)); 378 kfree(inst); 379} 380 381static struct crypto_template crypto_tmpl = { 382 .name = "lrw", 383 .alloc = alloc, 384 .free = free, 385 .module = THIS_MODULE, 386}; 387 388static int __init crypto_module_init(void) 389{ 390 return crypto_register_template(&crypto_tmpl); 391} 392 393static void __exit crypto_module_exit(void) 394{ 395 crypto_unregister_template(&crypto_tmpl); 396} 397 398module_init(crypto_module_init); 399module_exit(crypto_module_exit); 400 401MODULE_LICENSE("GPL"); 402MODULE_DESCRIPTION("LRW block cipher mode"); 403MODULE_ALIAS_CRYPTO("lrw"); 404