1/* 2 * Intel IXP4xx NPE-C crypto driver 3 * 4 * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of version 2 of the GNU General Public License 8 * as published by the Free Software Foundation. 9 * 10 */ 11 12#include <linux/platform_device.h> 13#include <linux/dma-mapping.h> 14#include <linux/dmapool.h> 15#include <linux/crypto.h> 16#include <linux/kernel.h> 17#include <linux/rtnetlink.h> 18#include <linux/interrupt.h> 19#include <linux/spinlock.h> 20#include <linux/gfp.h> 21#include <linux/module.h> 22 23#include <crypto/ctr.h> 24#include <crypto/des.h> 25#include <crypto/aes.h> 26#include <crypto/sha.h> 27#include <crypto/algapi.h> 28#include <crypto/aead.h> 29#include <crypto/authenc.h> 30#include <crypto/scatterwalk.h> 31 32#include <mach/npe.h> 33#include <mach/qmgr.h> 34 35#define MAX_KEYLEN 32 36 37/* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */ 38#define NPE_CTX_LEN 80 39#define AES_BLOCK128 16 40 41#define NPE_OP_HASH_VERIFY 0x01 42#define NPE_OP_CCM_ENABLE 0x04 43#define NPE_OP_CRYPT_ENABLE 0x08 44#define NPE_OP_HASH_ENABLE 0x10 45#define NPE_OP_NOT_IN_PLACE 0x20 46#define NPE_OP_HMAC_DISABLE 0x40 47#define NPE_OP_CRYPT_ENCRYPT 0x80 48 49#define NPE_OP_CCM_GEN_MIC 0xcc 50#define NPE_OP_HASH_GEN_ICV 0x50 51#define NPE_OP_ENC_GEN_KEY 0xc9 52 53#define MOD_ECB 0x0000 54#define MOD_CTR 0x1000 55#define MOD_CBC_ENC 0x2000 56#define MOD_CBC_DEC 0x3000 57#define MOD_CCM_ENC 0x4000 58#define MOD_CCM_DEC 0x5000 59 60#define KEYLEN_128 4 61#define KEYLEN_192 6 62#define KEYLEN_256 8 63 64#define CIPH_DECR 0x0000 65#define CIPH_ENCR 0x0400 66 67#define MOD_DES 0x0000 68#define MOD_TDEA2 0x0100 69#define MOD_3DES 0x0200 70#define MOD_AES 0x0800 71#define MOD_AES128 (0x0800 | KEYLEN_128) 72#define MOD_AES192 (0x0900 | KEYLEN_192) 73#define MOD_AES256 (0x0a00 | KEYLEN_256) 74 75#define MAX_IVLEN 16 76#define NPE_ID 2 /* NPE C */ 77#define NPE_QLEN 16 78/* Space for registering when the first 79 * NPE_QLEN crypt_ctl are busy */ 80#define NPE_QLEN_TOTAL 64 81 82#define SEND_QID 29 83#define RECV_QID 30 84 85#define CTL_FLAG_UNUSED 0x0000 86#define CTL_FLAG_USED 0x1000 87#define CTL_FLAG_PERFORM_ABLK 0x0001 88#define CTL_FLAG_GEN_ICV 0x0002 89#define CTL_FLAG_GEN_REVAES 0x0004 90#define CTL_FLAG_PERFORM_AEAD 0x0008 91#define CTL_FLAG_MASK 0x000f 92 93#define HMAC_IPAD_VALUE 0x36 94#define HMAC_OPAD_VALUE 0x5C 95#define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE 96 97#define MD5_DIGEST_SIZE 16 98 99struct buffer_desc { 100 u32 phys_next; 101#ifdef __ARMEB__ 102 u16 buf_len; 103 u16 pkt_len; 104#else 105 u16 pkt_len; 106 u16 buf_len; 107#endif 108 u32 phys_addr; 109 u32 __reserved[4]; 110 struct buffer_desc *next; 111 enum dma_data_direction dir; 112}; 113 114struct crypt_ctl { 115#ifdef __ARMEB__ 116 u8 mode; /* NPE_OP_* operation mode */ 117 u8 init_len; 118 u16 reserved; 119#else 120 u16 reserved; 121 u8 init_len; 122 u8 mode; /* NPE_OP_* operation mode */ 123#endif 124 u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */ 125 u32 icv_rev_aes; /* icv or rev aes */ 126 u32 src_buf; 127 u32 dst_buf; 128#ifdef __ARMEB__ 129 u16 auth_offs; /* Authentication start offset */ 130 u16 auth_len; /* Authentication data length */ 131 u16 crypt_offs; /* Cryption start offset */ 132 u16 crypt_len; /* Cryption data length */ 133#else 134 u16 auth_len; /* Authentication data length */ 135 u16 auth_offs; /* Authentication start offset */ 136 u16 crypt_len; /* Cryption data length */ 137 u16 crypt_offs; /* Cryption start offset */ 138#endif 139 u32 aadAddr; /* Additional Auth Data Addr for CCM mode */ 140 u32 crypto_ctx; /* NPE Crypto Param structure address */ 141 142 /* Used by Host: 4*4 bytes*/ 143 unsigned ctl_flags; 144 union { 145 struct ablkcipher_request *ablk_req; 146 struct aead_request *aead_req; 147 struct crypto_tfm *tfm; 148 } data; 149 struct buffer_desc *regist_buf; 150 u8 *regist_ptr; 151}; 152 153struct ablk_ctx { 154 struct buffer_desc *src; 155 struct buffer_desc *dst; 156}; 157 158struct aead_ctx { 159 struct buffer_desc *buffer; 160 struct scatterlist ivlist; 161 /* used when the hmac is not on one sg entry */ 162 u8 *hmac_virt; 163 int encrypt; 164}; 165 166struct ix_hash_algo { 167 u32 cfgword; 168 unsigned char *icv; 169}; 170 171struct ix_sa_dir { 172 unsigned char *npe_ctx; 173 dma_addr_t npe_ctx_phys; 174 int npe_ctx_idx; 175 u8 npe_mode; 176}; 177 178struct ixp_ctx { 179 struct ix_sa_dir encrypt; 180 struct ix_sa_dir decrypt; 181 int authkey_len; 182 u8 authkey[MAX_KEYLEN]; 183 int enckey_len; 184 u8 enckey[MAX_KEYLEN]; 185 u8 salt[MAX_IVLEN]; 186 u8 nonce[CTR_RFC3686_NONCE_SIZE]; 187 unsigned salted; 188 atomic_t configuring; 189 struct completion completion; 190}; 191 192struct ixp_alg { 193 struct crypto_alg crypto; 194 const struct ix_hash_algo *hash; 195 u32 cfg_enc; 196 u32 cfg_dec; 197 198 int registered; 199}; 200 201static const struct ix_hash_algo hash_alg_md5 = { 202 .cfgword = 0xAA010004, 203 .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF" 204 "\xFE\xDC\xBA\x98\x76\x54\x32\x10", 205}; 206static const struct ix_hash_algo hash_alg_sha1 = { 207 .cfgword = 0x00000005, 208 .icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA" 209 "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0", 210}; 211 212static struct npe *npe_c; 213static struct dma_pool *buffer_pool = NULL; 214static struct dma_pool *ctx_pool = NULL; 215 216static struct crypt_ctl *crypt_virt = NULL; 217static dma_addr_t crypt_phys; 218 219static int support_aes = 1; 220 221#define DRIVER_NAME "ixp4xx_crypto" 222 223static struct platform_device *pdev; 224 225static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt) 226{ 227 return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl); 228} 229 230static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys) 231{ 232 return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl); 233} 234 235static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm) 236{ 237 return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_enc; 238} 239 240static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm) 241{ 242 return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_dec; 243} 244 245static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm) 246{ 247 return container_of(tfm->__crt_alg, struct ixp_alg, crypto)->hash; 248} 249 250static int setup_crypt_desc(void) 251{ 252 struct device *dev = &pdev->dev; 253 BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64); 254 crypt_virt = dma_alloc_coherent(dev, 255 NPE_QLEN * sizeof(struct crypt_ctl), 256 &crypt_phys, GFP_ATOMIC); 257 if (!crypt_virt) 258 return -ENOMEM; 259 memset(crypt_virt, 0, NPE_QLEN * sizeof(struct crypt_ctl)); 260 return 0; 261} 262 263static spinlock_t desc_lock; 264static struct crypt_ctl *get_crypt_desc(void) 265{ 266 int i; 267 static int idx = 0; 268 unsigned long flags; 269 270 spin_lock_irqsave(&desc_lock, flags); 271 272 if (unlikely(!crypt_virt)) 273 setup_crypt_desc(); 274 if (unlikely(!crypt_virt)) { 275 spin_unlock_irqrestore(&desc_lock, flags); 276 return NULL; 277 } 278 i = idx; 279 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) { 280 if (++idx >= NPE_QLEN) 281 idx = 0; 282 crypt_virt[i].ctl_flags = CTL_FLAG_USED; 283 spin_unlock_irqrestore(&desc_lock, flags); 284 return crypt_virt +i; 285 } else { 286 spin_unlock_irqrestore(&desc_lock, flags); 287 return NULL; 288 } 289} 290 291static spinlock_t emerg_lock; 292static struct crypt_ctl *get_crypt_desc_emerg(void) 293{ 294 int i; 295 static int idx = NPE_QLEN; 296 struct crypt_ctl *desc; 297 unsigned long flags; 298 299 desc = get_crypt_desc(); 300 if (desc) 301 return desc; 302 if (unlikely(!crypt_virt)) 303 return NULL; 304 305 spin_lock_irqsave(&emerg_lock, flags); 306 i = idx; 307 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) { 308 if (++idx >= NPE_QLEN_TOTAL) 309 idx = NPE_QLEN; 310 crypt_virt[i].ctl_flags = CTL_FLAG_USED; 311 spin_unlock_irqrestore(&emerg_lock, flags); 312 return crypt_virt +i; 313 } else { 314 spin_unlock_irqrestore(&emerg_lock, flags); 315 return NULL; 316 } 317} 318 319static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys) 320{ 321 while (buf) { 322 struct buffer_desc *buf1; 323 u32 phys1; 324 325 buf1 = buf->next; 326 phys1 = buf->phys_next; 327 dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir); 328 dma_pool_free(buffer_pool, buf, phys); 329 buf = buf1; 330 phys = phys1; 331 } 332} 333 334static struct tasklet_struct crypto_done_tasklet; 335 336static void finish_scattered_hmac(struct crypt_ctl *crypt) 337{ 338 struct aead_request *req = crypt->data.aead_req; 339 struct aead_ctx *req_ctx = aead_request_ctx(req); 340 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 341 int authsize = crypto_aead_authsize(tfm); 342 int decryptlen = req->cryptlen - authsize; 343 344 if (req_ctx->encrypt) { 345 scatterwalk_map_and_copy(req_ctx->hmac_virt, 346 req->src, decryptlen, authsize, 1); 347 } 348 dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes); 349} 350 351static void one_packet(dma_addr_t phys) 352{ 353 struct device *dev = &pdev->dev; 354 struct crypt_ctl *crypt; 355 struct ixp_ctx *ctx; 356 int failed; 357 358 failed = phys & 0x1 ? -EBADMSG : 0; 359 phys &= ~0x3; 360 crypt = crypt_phys2virt(phys); 361 362 switch (crypt->ctl_flags & CTL_FLAG_MASK) { 363 case CTL_FLAG_PERFORM_AEAD: { 364 struct aead_request *req = crypt->data.aead_req; 365 struct aead_ctx *req_ctx = aead_request_ctx(req); 366 367 free_buf_chain(dev, req_ctx->buffer, crypt->src_buf); 368 if (req_ctx->hmac_virt) { 369 finish_scattered_hmac(crypt); 370 } 371 req->base.complete(&req->base, failed); 372 break; 373 } 374 case CTL_FLAG_PERFORM_ABLK: { 375 struct ablkcipher_request *req = crypt->data.ablk_req; 376 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req); 377 378 if (req_ctx->dst) { 379 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); 380 } 381 free_buf_chain(dev, req_ctx->src, crypt->src_buf); 382 req->base.complete(&req->base, failed); 383 break; 384 } 385 case CTL_FLAG_GEN_ICV: 386 ctx = crypto_tfm_ctx(crypt->data.tfm); 387 dma_pool_free(ctx_pool, crypt->regist_ptr, 388 crypt->regist_buf->phys_addr); 389 dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf); 390 if (atomic_dec_and_test(&ctx->configuring)) 391 complete(&ctx->completion); 392 break; 393 case CTL_FLAG_GEN_REVAES: 394 ctx = crypto_tfm_ctx(crypt->data.tfm); 395 *(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR); 396 if (atomic_dec_and_test(&ctx->configuring)) 397 complete(&ctx->completion); 398 break; 399 default: 400 BUG(); 401 } 402 crypt->ctl_flags = CTL_FLAG_UNUSED; 403} 404 405static void irqhandler(void *_unused) 406{ 407 tasklet_schedule(&crypto_done_tasklet); 408} 409 410static void crypto_done_action(unsigned long arg) 411{ 412 int i; 413 414 for(i=0; i<4; i++) { 415 dma_addr_t phys = qmgr_get_entry(RECV_QID); 416 if (!phys) 417 return; 418 one_packet(phys); 419 } 420 tasklet_schedule(&crypto_done_tasklet); 421} 422 423static int init_ixp_crypto(struct device *dev) 424{ 425 int ret = -ENODEV; 426 u32 msg[2] = { 0, 0 }; 427 428 if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH | 429 IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) { 430 printk(KERN_ERR "ixp_crypto: No HW crypto available\n"); 431 return ret; 432 } 433 npe_c = npe_request(NPE_ID); 434 if (!npe_c) 435 return ret; 436 437 if (!npe_running(npe_c)) { 438 ret = npe_load_firmware(npe_c, npe_name(npe_c), dev); 439 if (ret) { 440 return ret; 441 } 442 if (npe_recv_message(npe_c, msg, "STATUS_MSG")) 443 goto npe_error; 444 } else { 445 if (npe_send_message(npe_c, msg, "STATUS_MSG")) 446 goto npe_error; 447 448 if (npe_recv_message(npe_c, msg, "STATUS_MSG")) 449 goto npe_error; 450 } 451 452 switch ((msg[1]>>16) & 0xff) { 453 case 3: 454 printk(KERN_WARNING "Firmware of %s lacks AES support\n", 455 npe_name(npe_c)); 456 support_aes = 0; 457 break; 458 case 4: 459 case 5: 460 support_aes = 1; 461 break; 462 default: 463 printk(KERN_ERR "Firmware of %s lacks crypto support\n", 464 npe_name(npe_c)); 465 return -ENODEV; 466 } 467 /* buffer_pool will also be used to sometimes store the hmac, 468 * so assure it is large enough 469 */ 470 BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc)); 471 buffer_pool = dma_pool_create("buffer", dev, 472 sizeof(struct buffer_desc), 32, 0); 473 ret = -ENOMEM; 474 if (!buffer_pool) { 475 goto err; 476 } 477 ctx_pool = dma_pool_create("context", dev, 478 NPE_CTX_LEN, 16, 0); 479 if (!ctx_pool) { 480 goto err; 481 } 482 ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0, 483 "ixp_crypto:out", NULL); 484 if (ret) 485 goto err; 486 ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0, 487 "ixp_crypto:in", NULL); 488 if (ret) { 489 qmgr_release_queue(SEND_QID); 490 goto err; 491 } 492 qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL); 493 tasklet_init(&crypto_done_tasklet, crypto_done_action, 0); 494 495 qmgr_enable_irq(RECV_QID); 496 return 0; 497 498npe_error: 499 printk(KERN_ERR "%s not responding\n", npe_name(npe_c)); 500 ret = -EIO; 501err: 502 if (ctx_pool) 503 dma_pool_destroy(ctx_pool); 504 if (buffer_pool) 505 dma_pool_destroy(buffer_pool); 506 npe_release(npe_c); 507 return ret; 508} 509 510static void release_ixp_crypto(struct device *dev) 511{ 512 qmgr_disable_irq(RECV_QID); 513 tasklet_kill(&crypto_done_tasklet); 514 515 qmgr_release_queue(SEND_QID); 516 qmgr_release_queue(RECV_QID); 517 518 dma_pool_destroy(ctx_pool); 519 dma_pool_destroy(buffer_pool); 520 521 npe_release(npe_c); 522 523 if (crypt_virt) { 524 dma_free_coherent(dev, 525 NPE_QLEN_TOTAL * sizeof( struct crypt_ctl), 526 crypt_virt, crypt_phys); 527 } 528 return; 529} 530 531static void reset_sa_dir(struct ix_sa_dir *dir) 532{ 533 memset(dir->npe_ctx, 0, NPE_CTX_LEN); 534 dir->npe_ctx_idx = 0; 535 dir->npe_mode = 0; 536} 537 538static int init_sa_dir(struct ix_sa_dir *dir) 539{ 540 dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys); 541 if (!dir->npe_ctx) { 542 return -ENOMEM; 543 } 544 reset_sa_dir(dir); 545 return 0; 546} 547 548static void free_sa_dir(struct ix_sa_dir *dir) 549{ 550 memset(dir->npe_ctx, 0, NPE_CTX_LEN); 551 dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys); 552} 553 554static int init_tfm(struct crypto_tfm *tfm) 555{ 556 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); 557 int ret; 558 559 atomic_set(&ctx->configuring, 0); 560 ret = init_sa_dir(&ctx->encrypt); 561 if (ret) 562 return ret; 563 ret = init_sa_dir(&ctx->decrypt); 564 if (ret) { 565 free_sa_dir(&ctx->encrypt); 566 } 567 return ret; 568} 569 570static int init_tfm_ablk(struct crypto_tfm *tfm) 571{ 572 tfm->crt_ablkcipher.reqsize = sizeof(struct ablk_ctx); 573 return init_tfm(tfm); 574} 575 576static int init_tfm_aead(struct crypto_tfm *tfm) 577{ 578 tfm->crt_aead.reqsize = sizeof(struct aead_ctx); 579 return init_tfm(tfm); 580} 581 582static void exit_tfm(struct crypto_tfm *tfm) 583{ 584 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); 585 free_sa_dir(&ctx->encrypt); 586 free_sa_dir(&ctx->decrypt); 587} 588 589static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target, 590 int init_len, u32 ctx_addr, const u8 *key, int key_len) 591{ 592 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); 593 struct crypt_ctl *crypt; 594 struct buffer_desc *buf; 595 int i; 596 u8 *pad; 597 u32 pad_phys, buf_phys; 598 599 BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN); 600 pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys); 601 if (!pad) 602 return -ENOMEM; 603 buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys); 604 if (!buf) { 605 dma_pool_free(ctx_pool, pad, pad_phys); 606 return -ENOMEM; 607 } 608 crypt = get_crypt_desc_emerg(); 609 if (!crypt) { 610 dma_pool_free(ctx_pool, pad, pad_phys); 611 dma_pool_free(buffer_pool, buf, buf_phys); 612 return -EAGAIN; 613 } 614 615 memcpy(pad, key, key_len); 616 memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len); 617 for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) { 618 pad[i] ^= xpad; 619 } 620 621 crypt->data.tfm = tfm; 622 crypt->regist_ptr = pad; 623 crypt->regist_buf = buf; 624 625 crypt->auth_offs = 0; 626 crypt->auth_len = HMAC_PAD_BLOCKLEN; 627 crypt->crypto_ctx = ctx_addr; 628 crypt->src_buf = buf_phys; 629 crypt->icv_rev_aes = target; 630 crypt->mode = NPE_OP_HASH_GEN_ICV; 631 crypt->init_len = init_len; 632 crypt->ctl_flags |= CTL_FLAG_GEN_ICV; 633 634 buf->next = 0; 635 buf->buf_len = HMAC_PAD_BLOCKLEN; 636 buf->pkt_len = 0; 637 buf->phys_addr = pad_phys; 638 639 atomic_inc(&ctx->configuring); 640 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt)); 641 BUG_ON(qmgr_stat_overflow(SEND_QID)); 642 return 0; 643} 644 645static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize, 646 const u8 *key, int key_len, unsigned digest_len) 647{ 648 u32 itarget, otarget, npe_ctx_addr; 649 unsigned char *cinfo; 650 int init_len, ret = 0; 651 u32 cfgword; 652 struct ix_sa_dir *dir; 653 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); 654 const struct ix_hash_algo *algo; 655 656 dir = encrypt ? &ctx->encrypt : &ctx->decrypt; 657 cinfo = dir->npe_ctx + dir->npe_ctx_idx; 658 algo = ix_hash(tfm); 659 660 /* write cfg word to cryptinfo */ 661 cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */ 662#ifndef __ARMEB__ 663 cfgword ^= 0xAA000000; /* change the "byte swap" flags */ 664#endif 665 *(u32*)cinfo = cpu_to_be32(cfgword); 666 cinfo += sizeof(cfgword); 667 668 /* write ICV to cryptinfo */ 669 memcpy(cinfo, algo->icv, digest_len); 670 cinfo += digest_len; 671 672 itarget = dir->npe_ctx_phys + dir->npe_ctx_idx 673 + sizeof(algo->cfgword); 674 otarget = itarget + digest_len; 675 init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx); 676 npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx; 677 678 dir->npe_ctx_idx += init_len; 679 dir->npe_mode |= NPE_OP_HASH_ENABLE; 680 681 if (!encrypt) 682 dir->npe_mode |= NPE_OP_HASH_VERIFY; 683 684 ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget, 685 init_len, npe_ctx_addr, key, key_len); 686 if (ret) 687 return ret; 688 return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget, 689 init_len, npe_ctx_addr, key, key_len); 690} 691 692static int gen_rev_aes_key(struct crypto_tfm *tfm) 693{ 694 struct crypt_ctl *crypt; 695 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); 696 struct ix_sa_dir *dir = &ctx->decrypt; 697 698 crypt = get_crypt_desc_emerg(); 699 if (!crypt) { 700 return -EAGAIN; 701 } 702 *(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR); 703 704 crypt->data.tfm = tfm; 705 crypt->crypt_offs = 0; 706 crypt->crypt_len = AES_BLOCK128; 707 crypt->src_buf = 0; 708 crypt->crypto_ctx = dir->npe_ctx_phys; 709 crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32); 710 crypt->mode = NPE_OP_ENC_GEN_KEY; 711 crypt->init_len = dir->npe_ctx_idx; 712 crypt->ctl_flags |= CTL_FLAG_GEN_REVAES; 713 714 atomic_inc(&ctx->configuring); 715 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt)); 716 BUG_ON(qmgr_stat_overflow(SEND_QID)); 717 return 0; 718} 719 720static int setup_cipher(struct crypto_tfm *tfm, int encrypt, 721 const u8 *key, int key_len) 722{ 723 u8 *cinfo; 724 u32 cipher_cfg; 725 u32 keylen_cfg = 0; 726 struct ix_sa_dir *dir; 727 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); 728 u32 *flags = &tfm->crt_flags; 729 730 dir = encrypt ? &ctx->encrypt : &ctx->decrypt; 731 cinfo = dir->npe_ctx; 732 733 if (encrypt) { 734 cipher_cfg = cipher_cfg_enc(tfm); 735 dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT; 736 } else { 737 cipher_cfg = cipher_cfg_dec(tfm); 738 } 739 if (cipher_cfg & MOD_AES) { 740 switch (key_len) { 741 case 16: keylen_cfg = MOD_AES128; break; 742 case 24: keylen_cfg = MOD_AES192; break; 743 case 32: keylen_cfg = MOD_AES256; break; 744 default: 745 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 746 return -EINVAL; 747 } 748 cipher_cfg |= keylen_cfg; 749 } else if (cipher_cfg & MOD_3DES) { 750 const u32 *K = (const u32 *)key; 751 if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || 752 !((K[2] ^ K[4]) | (K[3] ^ K[5])))) 753 { 754 *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED; 755 return -EINVAL; 756 } 757 } else { 758 u32 tmp[DES_EXPKEY_WORDS]; 759 if (des_ekey(tmp, key) == 0) { 760 *flags |= CRYPTO_TFM_RES_WEAK_KEY; 761 } 762 } 763 /* write cfg word to cryptinfo */ 764 *(u32*)cinfo = cpu_to_be32(cipher_cfg); 765 cinfo += sizeof(cipher_cfg); 766 767 /* write cipher key to cryptinfo */ 768 memcpy(cinfo, key, key_len); 769 /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */ 770 if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) { 771 memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len); 772 key_len = DES3_EDE_KEY_SIZE; 773 } 774 dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len; 775 dir->npe_mode |= NPE_OP_CRYPT_ENABLE; 776 if ((cipher_cfg & MOD_AES) && !encrypt) { 777 return gen_rev_aes_key(tfm); 778 } 779 return 0; 780} 781 782static struct buffer_desc *chainup_buffers(struct device *dev, 783 struct scatterlist *sg, unsigned nbytes, 784 struct buffer_desc *buf, gfp_t flags, 785 enum dma_data_direction dir) 786{ 787 for (; nbytes > 0; sg = sg_next(sg)) { 788 unsigned len = min(nbytes, sg->length); 789 struct buffer_desc *next_buf; 790 u32 next_buf_phys; 791 void *ptr; 792 793 nbytes -= len; 794 ptr = page_address(sg_page(sg)) + sg->offset; 795 next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys); 796 if (!next_buf) { 797 buf = NULL; 798 break; 799 } 800 sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir); 801 buf->next = next_buf; 802 buf->phys_next = next_buf_phys; 803 buf = next_buf; 804 805 buf->phys_addr = sg_dma_address(sg); 806 buf->buf_len = len; 807 buf->dir = dir; 808 } 809 buf->next = NULL; 810 buf->phys_next = 0; 811 return buf; 812} 813 814static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 815 unsigned int key_len) 816{ 817 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm); 818 u32 *flags = &tfm->base.crt_flags; 819 int ret; 820 821 init_completion(&ctx->completion); 822 atomic_inc(&ctx->configuring); 823 824 reset_sa_dir(&ctx->encrypt); 825 reset_sa_dir(&ctx->decrypt); 826 827 ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE; 828 ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE; 829 830 ret = setup_cipher(&tfm->base, 0, key, key_len); 831 if (ret) 832 goto out; 833 ret = setup_cipher(&tfm->base, 1, key, key_len); 834 if (ret) 835 goto out; 836 837 if (*flags & CRYPTO_TFM_RES_WEAK_KEY) { 838 if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) { 839 ret = -EINVAL; 840 } else { 841 *flags &= ~CRYPTO_TFM_RES_WEAK_KEY; 842 } 843 } 844out: 845 if (!atomic_dec_and_test(&ctx->configuring)) 846 wait_for_completion(&ctx->completion); 847 return ret; 848} 849 850static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 851 unsigned int key_len) 852{ 853 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm); 854 855 /* the nonce is stored in bytes at end of key */ 856 if (key_len < CTR_RFC3686_NONCE_SIZE) 857 return -EINVAL; 858 859 memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE), 860 CTR_RFC3686_NONCE_SIZE); 861 862 key_len -= CTR_RFC3686_NONCE_SIZE; 863 return ablk_setkey(tfm, key, key_len); 864} 865 866static int ablk_perform(struct ablkcipher_request *req, int encrypt) 867{ 868 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 869 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm); 870 unsigned ivsize = crypto_ablkcipher_ivsize(tfm); 871 struct ix_sa_dir *dir; 872 struct crypt_ctl *crypt; 873 unsigned int nbytes = req->nbytes; 874 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL; 875 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req); 876 struct buffer_desc src_hook; 877 struct device *dev = &pdev->dev; 878 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? 879 GFP_KERNEL : GFP_ATOMIC; 880 881 if (qmgr_stat_full(SEND_QID)) 882 return -EAGAIN; 883 if (atomic_read(&ctx->configuring)) 884 return -EAGAIN; 885 886 dir = encrypt ? &ctx->encrypt : &ctx->decrypt; 887 888 crypt = get_crypt_desc(); 889 if (!crypt) 890 return -ENOMEM; 891 892 crypt->data.ablk_req = req; 893 crypt->crypto_ctx = dir->npe_ctx_phys; 894 crypt->mode = dir->npe_mode; 895 crypt->init_len = dir->npe_ctx_idx; 896 897 crypt->crypt_offs = 0; 898 crypt->crypt_len = nbytes; 899 900 BUG_ON(ivsize && !req->info); 901 memcpy(crypt->iv, req->info, ivsize); 902 if (req->src != req->dst) { 903 struct buffer_desc dst_hook; 904 crypt->mode |= NPE_OP_NOT_IN_PLACE; 905 /* This was never tested by Intel 906 * for more than one dst buffer, I think. */ 907 req_ctx->dst = NULL; 908 if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook, 909 flags, DMA_FROM_DEVICE)) 910 goto free_buf_dest; 911 src_direction = DMA_TO_DEVICE; 912 req_ctx->dst = dst_hook.next; 913 crypt->dst_buf = dst_hook.phys_next; 914 } else { 915 req_ctx->dst = NULL; 916 } 917 req_ctx->src = NULL; 918 if (!chainup_buffers(dev, req->src, nbytes, &src_hook, 919 flags, src_direction)) 920 goto free_buf_src; 921 922 req_ctx->src = src_hook.next; 923 crypt->src_buf = src_hook.phys_next; 924 crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK; 925 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt)); 926 BUG_ON(qmgr_stat_overflow(SEND_QID)); 927 return -EINPROGRESS; 928 929free_buf_src: 930 free_buf_chain(dev, req_ctx->src, crypt->src_buf); 931free_buf_dest: 932 if (req->src != req->dst) { 933 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); 934 } 935 crypt->ctl_flags = CTL_FLAG_UNUSED; 936 return -ENOMEM; 937} 938 939static int ablk_encrypt(struct ablkcipher_request *req) 940{ 941 return ablk_perform(req, 1); 942} 943 944static int ablk_decrypt(struct ablkcipher_request *req) 945{ 946 return ablk_perform(req, 0); 947} 948 949static int ablk_rfc3686_crypt(struct ablkcipher_request *req) 950{ 951 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 952 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm); 953 u8 iv[CTR_RFC3686_BLOCK_SIZE]; 954 u8 *info = req->info; 955 int ret; 956 957 /* set up counter block */ 958 memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE); 959 memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE); 960 961 /* initialize counter portion of counter block */ 962 *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = 963 cpu_to_be32(1); 964 965 req->info = iv; 966 ret = ablk_perform(req, 1); 967 req->info = info; 968 return ret; 969} 970 971static int hmac_inconsistent(struct scatterlist *sg, unsigned start, 972 unsigned int nbytes) 973{ 974 int offset = 0; 975 976 if (!nbytes) 977 return 0; 978 979 for (;;) { 980 if (start < offset + sg->length) 981 break; 982 983 offset += sg->length; 984 sg = sg_next(sg); 985 } 986 return (start + nbytes > offset + sg->length); 987} 988 989static int aead_perform(struct aead_request *req, int encrypt, 990 int cryptoffset, int eff_cryptlen, u8 *iv) 991{ 992 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 993 struct ixp_ctx *ctx = crypto_aead_ctx(tfm); 994 unsigned ivsize = crypto_aead_ivsize(tfm); 995 unsigned authsize = crypto_aead_authsize(tfm); 996 struct ix_sa_dir *dir; 997 struct crypt_ctl *crypt; 998 unsigned int cryptlen; 999 struct buffer_desc *buf, src_hook; 1000 struct aead_ctx *req_ctx = aead_request_ctx(req); 1001 struct device *dev = &pdev->dev; 1002 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? 1003 GFP_KERNEL : GFP_ATOMIC; 1004 1005 if (qmgr_stat_full(SEND_QID)) 1006 return -EAGAIN; 1007 if (atomic_read(&ctx->configuring)) 1008 return -EAGAIN; 1009 1010 if (encrypt) { 1011 dir = &ctx->encrypt; 1012 cryptlen = req->cryptlen; 1013 } else { 1014 dir = &ctx->decrypt; 1015 /* req->cryptlen includes the authsize when decrypting */ 1016 cryptlen = req->cryptlen -authsize; 1017 eff_cryptlen -= authsize; 1018 } 1019 crypt = get_crypt_desc(); 1020 if (!crypt) 1021 return -ENOMEM; 1022 1023 crypt->data.aead_req = req; 1024 crypt->crypto_ctx = dir->npe_ctx_phys; 1025 crypt->mode = dir->npe_mode; 1026 crypt->init_len = dir->npe_ctx_idx; 1027 1028 crypt->crypt_offs = cryptoffset; 1029 crypt->crypt_len = eff_cryptlen; 1030 1031 crypt->auth_offs = 0; 1032 crypt->auth_len = req->assoclen + ivsize + cryptlen; 1033 BUG_ON(ivsize && !req->iv); 1034 memcpy(crypt->iv, req->iv, ivsize); 1035 1036 if (req->src != req->dst) { 1037 BUG(); /* -ENOTSUP because of my laziness */ 1038 } 1039 1040 /* ASSOC data */ 1041 buf = chainup_buffers(dev, req->assoc, req->assoclen, &src_hook, 1042 flags, DMA_TO_DEVICE); 1043 req_ctx->buffer = src_hook.next; 1044 crypt->src_buf = src_hook.phys_next; 1045 if (!buf) 1046 goto out; 1047 /* IV */ 1048 sg_init_table(&req_ctx->ivlist, 1); 1049 sg_set_buf(&req_ctx->ivlist, iv, ivsize); 1050 buf = chainup_buffers(dev, &req_ctx->ivlist, ivsize, buf, flags, 1051 DMA_BIDIRECTIONAL); 1052 if (!buf) 1053 goto free_chain; 1054 if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) { 1055 /* The 12 hmac bytes are scattered, 1056 * we need to copy them into a safe buffer */ 1057 req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags, 1058 &crypt->icv_rev_aes); 1059 if (unlikely(!req_ctx->hmac_virt)) 1060 goto free_chain; 1061 if (!encrypt) { 1062 scatterwalk_map_and_copy(req_ctx->hmac_virt, 1063 req->src, cryptlen, authsize, 0); 1064 } 1065 req_ctx->encrypt = encrypt; 1066 } else { 1067 req_ctx->hmac_virt = NULL; 1068 } 1069 /* Crypt */ 1070 buf = chainup_buffers(dev, req->src, cryptlen + authsize, buf, flags, 1071 DMA_BIDIRECTIONAL); 1072 if (!buf) 1073 goto free_hmac_virt; 1074 if (!req_ctx->hmac_virt) { 1075 crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize; 1076 } 1077 1078 crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD; 1079 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt)); 1080 BUG_ON(qmgr_stat_overflow(SEND_QID)); 1081 return -EINPROGRESS; 1082free_hmac_virt: 1083 if (req_ctx->hmac_virt) { 1084 dma_pool_free(buffer_pool, req_ctx->hmac_virt, 1085 crypt->icv_rev_aes); 1086 } 1087free_chain: 1088 free_buf_chain(dev, req_ctx->buffer, crypt->src_buf); 1089out: 1090 crypt->ctl_flags = CTL_FLAG_UNUSED; 1091 return -ENOMEM; 1092} 1093 1094static int aead_setup(struct crypto_aead *tfm, unsigned int authsize) 1095{ 1096 struct ixp_ctx *ctx = crypto_aead_ctx(tfm); 1097 u32 *flags = &tfm->base.crt_flags; 1098 unsigned digest_len = crypto_aead_alg(tfm)->maxauthsize; 1099 int ret; 1100 1101 if (!ctx->enckey_len && !ctx->authkey_len) 1102 return 0; 1103 init_completion(&ctx->completion); 1104 atomic_inc(&ctx->configuring); 1105 1106 reset_sa_dir(&ctx->encrypt); 1107 reset_sa_dir(&ctx->decrypt); 1108 1109 ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len); 1110 if (ret) 1111 goto out; 1112 ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len); 1113 if (ret) 1114 goto out; 1115 ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey, 1116 ctx->authkey_len, digest_len); 1117 if (ret) 1118 goto out; 1119 ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey, 1120 ctx->authkey_len, digest_len); 1121 if (ret) 1122 goto out; 1123 1124 if (*flags & CRYPTO_TFM_RES_WEAK_KEY) { 1125 if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) { 1126 ret = -EINVAL; 1127 goto out; 1128 } else { 1129 *flags &= ~CRYPTO_TFM_RES_WEAK_KEY; 1130 } 1131 } 1132out: 1133 if (!atomic_dec_and_test(&ctx->configuring)) 1134 wait_for_completion(&ctx->completion); 1135 return ret; 1136} 1137 1138static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) 1139{ 1140 int max = crypto_aead_alg(tfm)->maxauthsize >> 2; 1141 1142 if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3)) 1143 return -EINVAL; 1144 return aead_setup(tfm, authsize); 1145} 1146 1147static int aead_setkey(struct crypto_aead *tfm, const u8 *key, 1148 unsigned int keylen) 1149{ 1150 struct ixp_ctx *ctx = crypto_aead_ctx(tfm); 1151 struct crypto_authenc_keys keys; 1152 1153 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 1154 goto badkey; 1155 1156 if (keys.authkeylen > sizeof(ctx->authkey)) 1157 goto badkey; 1158 1159 if (keys.enckeylen > sizeof(ctx->enckey)) 1160 goto badkey; 1161 1162 memcpy(ctx->authkey, keys.authkey, keys.authkeylen); 1163 memcpy(ctx->enckey, keys.enckey, keys.enckeylen); 1164 ctx->authkey_len = keys.authkeylen; 1165 ctx->enckey_len = keys.enckeylen; 1166 1167 return aead_setup(tfm, crypto_aead_authsize(tfm)); 1168badkey: 1169 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 1170 return -EINVAL; 1171} 1172 1173static int aead_encrypt(struct aead_request *req) 1174{ 1175 unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req)); 1176 return aead_perform(req, 1, req->assoclen + ivsize, 1177 req->cryptlen, req->iv); 1178} 1179 1180static int aead_decrypt(struct aead_request *req) 1181{ 1182 unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req)); 1183 return aead_perform(req, 0, req->assoclen + ivsize, 1184 req->cryptlen, req->iv); 1185} 1186 1187static int aead_givencrypt(struct aead_givcrypt_request *req) 1188{ 1189 struct crypto_aead *tfm = aead_givcrypt_reqtfm(req); 1190 struct ixp_ctx *ctx = crypto_aead_ctx(tfm); 1191 unsigned len, ivsize = crypto_aead_ivsize(tfm); 1192 __be64 seq; 1193 1194 /* copied from eseqiv.c */ 1195 if (!ctx->salted) { 1196 get_random_bytes(ctx->salt, ivsize); 1197 ctx->salted = 1; 1198 } 1199 memcpy(req->areq.iv, ctx->salt, ivsize); 1200 len = ivsize; 1201 if (ivsize > sizeof(u64)) { 1202 memset(req->giv, 0, ivsize - sizeof(u64)); 1203 len = sizeof(u64); 1204 } 1205 seq = cpu_to_be64(req->seq); 1206 memcpy(req->giv + ivsize - len, &seq, len); 1207 return aead_perform(&req->areq, 1, req->areq.assoclen, 1208 req->areq.cryptlen +ivsize, req->giv); 1209} 1210 1211static struct ixp_alg ixp4xx_algos[] = { 1212{ 1213 .crypto = { 1214 .cra_name = "cbc(des)", 1215 .cra_blocksize = DES_BLOCK_SIZE, 1216 .cra_u = { .ablkcipher = { 1217 .min_keysize = DES_KEY_SIZE, 1218 .max_keysize = DES_KEY_SIZE, 1219 .ivsize = DES_BLOCK_SIZE, 1220 .geniv = "eseqiv", 1221 } 1222 } 1223 }, 1224 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192, 1225 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192, 1226 1227}, { 1228 .crypto = { 1229 .cra_name = "ecb(des)", 1230 .cra_blocksize = DES_BLOCK_SIZE, 1231 .cra_u = { .ablkcipher = { 1232 .min_keysize = DES_KEY_SIZE, 1233 .max_keysize = DES_KEY_SIZE, 1234 } 1235 } 1236 }, 1237 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192, 1238 .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192, 1239}, { 1240 .crypto = { 1241 .cra_name = "cbc(des3_ede)", 1242 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1243 .cra_u = { .ablkcipher = { 1244 .min_keysize = DES3_EDE_KEY_SIZE, 1245 .max_keysize = DES3_EDE_KEY_SIZE, 1246 .ivsize = DES3_EDE_BLOCK_SIZE, 1247 .geniv = "eseqiv", 1248 } 1249 } 1250 }, 1251 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192, 1252 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192, 1253}, { 1254 .crypto = { 1255 .cra_name = "ecb(des3_ede)", 1256 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1257 .cra_u = { .ablkcipher = { 1258 .min_keysize = DES3_EDE_KEY_SIZE, 1259 .max_keysize = DES3_EDE_KEY_SIZE, 1260 } 1261 } 1262 }, 1263 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192, 1264 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192, 1265}, { 1266 .crypto = { 1267 .cra_name = "cbc(aes)", 1268 .cra_blocksize = AES_BLOCK_SIZE, 1269 .cra_u = { .ablkcipher = { 1270 .min_keysize = AES_MIN_KEY_SIZE, 1271 .max_keysize = AES_MAX_KEY_SIZE, 1272 .ivsize = AES_BLOCK_SIZE, 1273 .geniv = "eseqiv", 1274 } 1275 } 1276 }, 1277 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC, 1278 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC, 1279}, { 1280 .crypto = { 1281 .cra_name = "ecb(aes)", 1282 .cra_blocksize = AES_BLOCK_SIZE, 1283 .cra_u = { .ablkcipher = { 1284 .min_keysize = AES_MIN_KEY_SIZE, 1285 .max_keysize = AES_MAX_KEY_SIZE, 1286 } 1287 } 1288 }, 1289 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB, 1290 .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB, 1291}, { 1292 .crypto = { 1293 .cra_name = "ctr(aes)", 1294 .cra_blocksize = AES_BLOCK_SIZE, 1295 .cra_u = { .ablkcipher = { 1296 .min_keysize = AES_MIN_KEY_SIZE, 1297 .max_keysize = AES_MAX_KEY_SIZE, 1298 .ivsize = AES_BLOCK_SIZE, 1299 .geniv = "eseqiv", 1300 } 1301 } 1302 }, 1303 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR, 1304 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR, 1305}, { 1306 .crypto = { 1307 .cra_name = "rfc3686(ctr(aes))", 1308 .cra_blocksize = AES_BLOCK_SIZE, 1309 .cra_u = { .ablkcipher = { 1310 .min_keysize = AES_MIN_KEY_SIZE, 1311 .max_keysize = AES_MAX_KEY_SIZE, 1312 .ivsize = AES_BLOCK_SIZE, 1313 .geniv = "eseqiv", 1314 .setkey = ablk_rfc3686_setkey, 1315 .encrypt = ablk_rfc3686_crypt, 1316 .decrypt = ablk_rfc3686_crypt } 1317 } 1318 }, 1319 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR, 1320 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR, 1321}, { 1322 .crypto = { 1323 .cra_name = "authenc(hmac(md5),cbc(des))", 1324 .cra_blocksize = DES_BLOCK_SIZE, 1325 .cra_u = { .aead = { 1326 .ivsize = DES_BLOCK_SIZE, 1327 .maxauthsize = MD5_DIGEST_SIZE, 1328 } 1329 } 1330 }, 1331 .hash = &hash_alg_md5, 1332 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192, 1333 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192, 1334}, { 1335 .crypto = { 1336 .cra_name = "authenc(hmac(md5),cbc(des3_ede))", 1337 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1338 .cra_u = { .aead = { 1339 .ivsize = DES3_EDE_BLOCK_SIZE, 1340 .maxauthsize = MD5_DIGEST_SIZE, 1341 } 1342 } 1343 }, 1344 .hash = &hash_alg_md5, 1345 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192, 1346 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192, 1347}, { 1348 .crypto = { 1349 .cra_name = "authenc(hmac(sha1),cbc(des))", 1350 .cra_blocksize = DES_BLOCK_SIZE, 1351 .cra_u = { .aead = { 1352 .ivsize = DES_BLOCK_SIZE, 1353 .maxauthsize = SHA1_DIGEST_SIZE, 1354 } 1355 } 1356 }, 1357 .hash = &hash_alg_sha1, 1358 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192, 1359 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192, 1360}, { 1361 .crypto = { 1362 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", 1363 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1364 .cra_u = { .aead = { 1365 .ivsize = DES3_EDE_BLOCK_SIZE, 1366 .maxauthsize = SHA1_DIGEST_SIZE, 1367 } 1368 } 1369 }, 1370 .hash = &hash_alg_sha1, 1371 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192, 1372 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192, 1373}, { 1374 .crypto = { 1375 .cra_name = "authenc(hmac(md5),cbc(aes))", 1376 .cra_blocksize = AES_BLOCK_SIZE, 1377 .cra_u = { .aead = { 1378 .ivsize = AES_BLOCK_SIZE, 1379 .maxauthsize = MD5_DIGEST_SIZE, 1380 } 1381 } 1382 }, 1383 .hash = &hash_alg_md5, 1384 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC, 1385 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC, 1386}, { 1387 .crypto = { 1388 .cra_name = "authenc(hmac(sha1),cbc(aes))", 1389 .cra_blocksize = AES_BLOCK_SIZE, 1390 .cra_u = { .aead = { 1391 .ivsize = AES_BLOCK_SIZE, 1392 .maxauthsize = SHA1_DIGEST_SIZE, 1393 } 1394 } 1395 }, 1396 .hash = &hash_alg_sha1, 1397 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC, 1398 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC, 1399} }; 1400 1401#define IXP_POSTFIX "-ixp4xx" 1402 1403static const struct platform_device_info ixp_dev_info __initdata = { 1404 .name = DRIVER_NAME, 1405 .id = 0, 1406 .dma_mask = DMA_BIT_MASK(32), 1407}; 1408 1409static int __init ixp_module_init(void) 1410{ 1411 int num = ARRAY_SIZE(ixp4xx_algos); 1412 int i, err; 1413 1414 pdev = platform_device_register_full(&ixp_dev_info); 1415 if (IS_ERR(pdev)) 1416 return PTR_ERR(pdev); 1417 1418 spin_lock_init(&desc_lock); 1419 spin_lock_init(&emerg_lock); 1420 1421 err = init_ixp_crypto(&pdev->dev); 1422 if (err) { 1423 platform_device_unregister(pdev); 1424 return err; 1425 } 1426 for (i=0; i< num; i++) { 1427 struct crypto_alg *cra = &ixp4xx_algos[i].crypto; 1428 1429 if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME, 1430 "%s"IXP_POSTFIX, cra->cra_name) >= 1431 CRYPTO_MAX_ALG_NAME) 1432 { 1433 continue; 1434 } 1435 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) { 1436 continue; 1437 } 1438 if (!ixp4xx_algos[i].hash) { 1439 /* block ciphers */ 1440 cra->cra_type = &crypto_ablkcipher_type; 1441 cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 1442 CRYPTO_ALG_KERN_DRIVER_ONLY | 1443 CRYPTO_ALG_ASYNC; 1444 if (!cra->cra_ablkcipher.setkey) 1445 cra->cra_ablkcipher.setkey = ablk_setkey; 1446 if (!cra->cra_ablkcipher.encrypt) 1447 cra->cra_ablkcipher.encrypt = ablk_encrypt; 1448 if (!cra->cra_ablkcipher.decrypt) 1449 cra->cra_ablkcipher.decrypt = ablk_decrypt; 1450 cra->cra_init = init_tfm_ablk; 1451 } else { 1452 /* authenc */ 1453 cra->cra_type = &crypto_aead_type; 1454 cra->cra_flags = CRYPTO_ALG_TYPE_AEAD | 1455 CRYPTO_ALG_KERN_DRIVER_ONLY | 1456 CRYPTO_ALG_ASYNC; 1457 cra->cra_aead.setkey = aead_setkey; 1458 cra->cra_aead.setauthsize = aead_setauthsize; 1459 cra->cra_aead.encrypt = aead_encrypt; 1460 cra->cra_aead.decrypt = aead_decrypt; 1461 cra->cra_aead.givencrypt = aead_givencrypt; 1462 cra->cra_init = init_tfm_aead; 1463 } 1464 cra->cra_ctxsize = sizeof(struct ixp_ctx); 1465 cra->cra_module = THIS_MODULE; 1466 cra->cra_alignmask = 3; 1467 cra->cra_priority = 300; 1468 cra->cra_exit = exit_tfm; 1469 if (crypto_register_alg(cra)) 1470 printk(KERN_ERR "Failed to register '%s'\n", 1471 cra->cra_name); 1472 else 1473 ixp4xx_algos[i].registered = 1; 1474 } 1475 return 0; 1476} 1477 1478static void __exit ixp_module_exit(void) 1479{ 1480 int num = ARRAY_SIZE(ixp4xx_algos); 1481 int i; 1482 1483 for (i=0; i< num; i++) { 1484 if (ixp4xx_algos[i].registered) 1485 crypto_unregister_alg(&ixp4xx_algos[i].crypto); 1486 } 1487 release_ixp_crypto(&pdev->dev); 1488 platform_device_unregister(pdev); 1489} 1490 1491module_init(ixp_module_init); 1492module_exit(ixp_module_exit); 1493 1494MODULE_LICENSE("GPL"); 1495MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>"); 1496MODULE_DESCRIPTION("IXP4xx hardware crypto"); 1497 1498