root/drivers/crypto/sahara.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. sahara_write
  2. sahara_read
  3. sahara_aes_key_hdr
  4. sahara_aes_data_link_hdr
  5. sahara_decode_error
  6. sahara_decode_status
  7. sahara_dump_descriptors
  8. sahara_dump_links
  9. sahara_hw_descriptor_create
  10. sahara_aes_process
  11. sahara_aes_setkey
  12. sahara_aes_crypt
  13. sahara_aes_ecb_encrypt
  14. sahara_aes_ecb_decrypt
  15. sahara_aes_cbc_encrypt
  16. sahara_aes_cbc_decrypt
  17. sahara_aes_cra_init
  18. sahara_aes_cra_exit
  19. sahara_sha_init_hdr
  20. sahara_sha_hw_links_create
  21. sahara_sha_hw_data_descriptor_create
  22. sahara_sha_hw_context_descriptor_create
  23. sahara_walk_and_recalc
  24. sahara_sha_prepare_request
  25. sahara_sha_process
  26. sahara_queue_manage
  27. sahara_sha_enqueue
  28. sahara_sha_init
  29. sahara_sha_update
  30. sahara_sha_final
  31. sahara_sha_finup
  32. sahara_sha_digest
  33. sahara_sha_export
  34. sahara_sha_import
  35. sahara_sha_cra_init
  36. sahara_irq_handler
  37. sahara_register_algs
  38. sahara_unregister_algs
  39. sahara_probe
  40. sahara_remove

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Cryptographic API.
   4  *
   5  * Support for SAHARA cryptographic accelerator.
   6  *
   7  * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
   8  * Copyright (c) 2013 Vista Silicon S.L.
   9  * Author: Javier Martin <javier.martin@vista-silicon.com>
  10  *
  11  * Based on omap-aes.c and tegra-aes.c
  12  */
  13 
  14 #include <crypto/aes.h>
  15 #include <crypto/internal/hash.h>
  16 #include <crypto/internal/skcipher.h>
  17 #include <crypto/scatterwalk.h>
  18 #include <crypto/sha.h>
  19 
  20 #include <linux/clk.h>
  21 #include <linux/crypto.h>
  22 #include <linux/interrupt.h>
  23 #include <linux/io.h>
  24 #include <linux/irq.h>
  25 #include <linux/kernel.h>
  26 #include <linux/kthread.h>
  27 #include <linux/module.h>
  28 #include <linux/mutex.h>
  29 #include <linux/of.h>
  30 #include <linux/of_device.h>
  31 #include <linux/platform_device.h>
  32 
  33 #define SHA_BUFFER_LEN          PAGE_SIZE
  34 #define SAHARA_MAX_SHA_BLOCK_SIZE       SHA256_BLOCK_SIZE
  35 
  36 #define SAHARA_NAME "sahara"
  37 #define SAHARA_VERSION_3        3
  38 #define SAHARA_VERSION_4        4
  39 #define SAHARA_TIMEOUT_MS       1000
  40 #define SAHARA_MAX_HW_DESC      2
  41 #define SAHARA_MAX_HW_LINK      20
  42 
  43 #define FLAGS_MODE_MASK         0x000f
  44 #define FLAGS_ENCRYPT           BIT(0)
  45 #define FLAGS_CBC               BIT(1)
  46 #define FLAGS_NEW_KEY           BIT(3)
  47 
  48 #define SAHARA_HDR_BASE                 0x00800000
  49 #define SAHARA_HDR_SKHA_ALG_AES 0
  50 #define SAHARA_HDR_SKHA_OP_ENC          (1 << 2)
  51 #define SAHARA_HDR_SKHA_MODE_ECB        (0 << 3)
  52 #define SAHARA_HDR_SKHA_MODE_CBC        (1 << 3)
  53 #define SAHARA_HDR_FORM_DATA            (5 << 16)
  54 #define SAHARA_HDR_FORM_KEY             (8 << 16)
  55 #define SAHARA_HDR_LLO                  (1 << 24)
  56 #define SAHARA_HDR_CHA_SKHA             (1 << 28)
  57 #define SAHARA_HDR_CHA_MDHA             (2 << 28)
  58 #define SAHARA_HDR_PARITY_BIT           (1 << 31)
  59 
  60 #define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
  61 #define SAHARA_HDR_MDHA_SET_MODE_HASH   0x208D0000
  62 #define SAHARA_HDR_MDHA_HASH            0xA0850000
  63 #define SAHARA_HDR_MDHA_STORE_DIGEST    0x20820000
  64 #define SAHARA_HDR_MDHA_ALG_SHA1        0
  65 #define SAHARA_HDR_MDHA_ALG_MD5         1
  66 #define SAHARA_HDR_MDHA_ALG_SHA256      2
  67 #define SAHARA_HDR_MDHA_ALG_SHA224      3
  68 #define SAHARA_HDR_MDHA_PDATA           (1 << 2)
  69 #define SAHARA_HDR_MDHA_HMAC            (1 << 3)
  70 #define SAHARA_HDR_MDHA_INIT            (1 << 5)
  71 #define SAHARA_HDR_MDHA_IPAD            (1 << 6)
  72 #define SAHARA_HDR_MDHA_OPAD            (1 << 7)
  73 #define SAHARA_HDR_MDHA_SWAP            (1 << 8)
  74 #define SAHARA_HDR_MDHA_MAC_FULL        (1 << 9)
  75 #define SAHARA_HDR_MDHA_SSL             (1 << 10)
  76 
  77 /* SAHARA can only process one request at a time */
  78 #define SAHARA_QUEUE_LENGTH     1
  79 
  80 #define SAHARA_REG_VERSION      0x00
  81 #define SAHARA_REG_DAR          0x04
  82 #define SAHARA_REG_CONTROL      0x08
  83 #define         SAHARA_CONTROL_SET_THROTTLE(x)  (((x) & 0xff) << 24)
  84 #define         SAHARA_CONTROL_SET_MAXBURST(x)  (((x) & 0xff) << 16)
  85 #define         SAHARA_CONTROL_RNG_AUTORSD      (1 << 7)
  86 #define         SAHARA_CONTROL_ENABLE_INT       (1 << 4)
  87 #define SAHARA_REG_CMD          0x0C
  88 #define         SAHARA_CMD_RESET                (1 << 0)
  89 #define         SAHARA_CMD_CLEAR_INT            (1 << 8)
  90 #define         SAHARA_CMD_CLEAR_ERR            (1 << 9)
  91 #define         SAHARA_CMD_SINGLE_STEP          (1 << 10)
  92 #define         SAHARA_CMD_MODE_BATCH           (1 << 16)
  93 #define         SAHARA_CMD_MODE_DEBUG           (1 << 18)
  94 #define SAHARA_REG_STATUS       0x10
  95 #define         SAHARA_STATUS_GET_STATE(x)      ((x) & 0x7)
  96 #define                 SAHARA_STATE_IDLE       0
  97 #define                 SAHARA_STATE_BUSY       1
  98 #define                 SAHARA_STATE_ERR        2
  99 #define                 SAHARA_STATE_FAULT      3
 100 #define                 SAHARA_STATE_COMPLETE   4
 101 #define                 SAHARA_STATE_COMP_FLAG  (1 << 2)
 102 #define         SAHARA_STATUS_DAR_FULL          (1 << 3)
 103 #define         SAHARA_STATUS_ERROR             (1 << 4)
 104 #define         SAHARA_STATUS_SECURE            (1 << 5)
 105 #define         SAHARA_STATUS_FAIL              (1 << 6)
 106 #define         SAHARA_STATUS_INIT              (1 << 7)
 107 #define         SAHARA_STATUS_RNG_RESEED        (1 << 8)
 108 #define         SAHARA_STATUS_ACTIVE_RNG        (1 << 9)
 109 #define         SAHARA_STATUS_ACTIVE_MDHA       (1 << 10)
 110 #define         SAHARA_STATUS_ACTIVE_SKHA       (1 << 11)
 111 #define         SAHARA_STATUS_MODE_BATCH        (1 << 16)
 112 #define         SAHARA_STATUS_MODE_DEDICATED    (1 << 17)
 113 #define         SAHARA_STATUS_MODE_DEBUG        (1 << 18)
 114 #define         SAHARA_STATUS_GET_ISTATE(x)     (((x) >> 24) & 0xff)
 115 #define SAHARA_REG_ERRSTATUS    0x14
 116 #define         SAHARA_ERRSTATUS_GET_SOURCE(x)  ((x) & 0xf)
 117 #define                 SAHARA_ERRSOURCE_CHA    14
 118 #define                 SAHARA_ERRSOURCE_DMA    15
 119 #define         SAHARA_ERRSTATUS_DMA_DIR        (1 << 8)
 120 #define         SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
 121 #define         SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
 122 #define         SAHARA_ERRSTATUS_GET_CHASRC(x)  (((x) >> 16) & 0xfff)
 123 #define         SAHARA_ERRSTATUS_GET_CHAERR(x)  (((x) >> 28) & 0x3)
 124 #define SAHARA_REG_FADDR        0x18
 125 #define SAHARA_REG_CDAR         0x1C
 126 #define SAHARA_REG_IDAR         0x20
 127 
 128 struct sahara_hw_desc {
 129         u32     hdr;
 130         u32     len1;
 131         u32     p1;
 132         u32     len2;
 133         u32     p2;
 134         u32     next;
 135 };
 136 
 137 struct sahara_hw_link {
 138         u32     len;
 139         u32     p;
 140         u32     next;
 141 };
 142 
 143 struct sahara_ctx {
 144         unsigned long flags;
 145 
 146         /* AES-specific context */
 147         int keylen;
 148         u8 key[AES_KEYSIZE_128];
 149         struct crypto_sync_skcipher *fallback;
 150 };
 151 
 152 struct sahara_aes_reqctx {
 153         unsigned long mode;
 154 };
 155 
 156 /*
 157  * struct sahara_sha_reqctx - private data per request
 158  * @buf: holds data for requests smaller than block_size
 159  * @rembuf: used to prepare one block_size-aligned request
 160  * @context: hw-specific context for request. Digest is extracted from this
 161  * @mode: specifies what type of hw-descriptor needs to be built
 162  * @digest_size: length of digest for this request
 163  * @context_size: length of hw-context for this request.
 164  *                Always digest_size + 4
 165  * @buf_cnt: number of bytes saved in buf
 166  * @sg_in_idx: number of hw links
 167  * @in_sg: scatterlist for input data
 168  * @in_sg_chain: scatterlists for chained input data
 169  * @total: total number of bytes for transfer
 170  * @last: is this the last block
 171  * @first: is this the first block
 172  * @active: inside a transfer
 173  */
 174 struct sahara_sha_reqctx {
 175         u8                      buf[SAHARA_MAX_SHA_BLOCK_SIZE];
 176         u8                      rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
 177         u8                      context[SHA256_DIGEST_SIZE + 4];
 178         unsigned int            mode;
 179         unsigned int            digest_size;
 180         unsigned int            context_size;
 181         unsigned int            buf_cnt;
 182         unsigned int            sg_in_idx;
 183         struct scatterlist      *in_sg;
 184         struct scatterlist      in_sg_chain[2];
 185         size_t                  total;
 186         unsigned int            last;
 187         unsigned int            first;
 188         unsigned int            active;
 189 };
 190 
 191 struct sahara_dev {
 192         struct device           *device;
 193         unsigned int            version;
 194         void __iomem            *regs_base;
 195         struct clk              *clk_ipg;
 196         struct clk              *clk_ahb;
 197         struct mutex            queue_mutex;
 198         struct task_struct      *kthread;
 199         struct completion       dma_completion;
 200 
 201         struct sahara_ctx       *ctx;
 202         struct crypto_queue     queue;
 203         unsigned long           flags;
 204 
 205         struct sahara_hw_desc   *hw_desc[SAHARA_MAX_HW_DESC];
 206         dma_addr_t              hw_phys_desc[SAHARA_MAX_HW_DESC];
 207 
 208         u8                      *key_base;
 209         dma_addr_t              key_phys_base;
 210 
 211         u8                      *iv_base;
 212         dma_addr_t              iv_phys_base;
 213 
 214         u8                      *context_base;
 215         dma_addr_t              context_phys_base;
 216 
 217         struct sahara_hw_link   *hw_link[SAHARA_MAX_HW_LINK];
 218         dma_addr_t              hw_phys_link[SAHARA_MAX_HW_LINK];
 219 
 220         size_t                  total;
 221         struct scatterlist      *in_sg;
 222         int             nb_in_sg;
 223         struct scatterlist      *out_sg;
 224         int             nb_out_sg;
 225 
 226         u32                     error;
 227 };
 228 
 229 static struct sahara_dev *dev_ptr;
 230 
 231 static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
 232 {
 233         writel(data, dev->regs_base + reg);
 234 }
 235 
 236 static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
 237 {
 238         return readl(dev->regs_base + reg);
 239 }
 240 
 241 static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
 242 {
 243         u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
 244                         SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
 245                         SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
 246 
 247         if (dev->flags & FLAGS_CBC) {
 248                 hdr |= SAHARA_HDR_SKHA_MODE_CBC;
 249                 hdr ^= SAHARA_HDR_PARITY_BIT;
 250         }
 251 
 252         if (dev->flags & FLAGS_ENCRYPT) {
 253                 hdr |= SAHARA_HDR_SKHA_OP_ENC;
 254                 hdr ^= SAHARA_HDR_PARITY_BIT;
 255         }
 256 
 257         return hdr;
 258 }
 259 
 260 static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
 261 {
 262         return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
 263                         SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
 264 }
 265 
 266 static const char *sahara_err_src[16] = {
 267         "No error",
 268         "Header error",
 269         "Descriptor length error",
 270         "Descriptor length or pointer error",
 271         "Link length error",
 272         "Link pointer error",
 273         "Input buffer error",
 274         "Output buffer error",
 275         "Output buffer starvation",
 276         "Internal state fault",
 277         "General descriptor problem",
 278         "Reserved",
 279         "Descriptor address error",
 280         "Link address error",
 281         "CHA error",
 282         "DMA error"
 283 };
 284 
 285 static const char *sahara_err_dmasize[4] = {
 286         "Byte transfer",
 287         "Half-word transfer",
 288         "Word transfer",
 289         "Reserved"
 290 };
 291 
 292 static const char *sahara_err_dmasrc[8] = {
 293         "No error",
 294         "AHB bus error",
 295         "Internal IP bus error",
 296         "Parity error",
 297         "DMA crosses 256 byte boundary",
 298         "DMA is busy",
 299         "Reserved",
 300         "DMA HW error"
 301 };
 302 
 303 static const char *sahara_cha_errsrc[12] = {
 304         "Input buffer non-empty",
 305         "Illegal address",
 306         "Illegal mode",
 307         "Illegal data size",
 308         "Illegal key size",
 309         "Write during processing",
 310         "CTX read during processing",
 311         "HW error",
 312         "Input buffer disabled/underflow",
 313         "Output buffer disabled/overflow",
 314         "DES key parity error",
 315         "Reserved"
 316 };
 317 
 318 static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
 319 
 320 static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
 321 {
 322         u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
 323         u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
 324 
 325         dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
 326 
 327         dev_err(dev->device, "  - %s.\n", sahara_err_src[source]);
 328 
 329         if (source == SAHARA_ERRSOURCE_DMA) {
 330                 if (error & SAHARA_ERRSTATUS_DMA_DIR)
 331                         dev_err(dev->device, "          * DMA read.\n");
 332                 else
 333                         dev_err(dev->device, "          * DMA write.\n");
 334 
 335                 dev_err(dev->device, "          * %s.\n",
 336                        sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
 337                 dev_err(dev->device, "          * %s.\n",
 338                        sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
 339         } else if (source == SAHARA_ERRSOURCE_CHA) {
 340                 dev_err(dev->device, "          * %s.\n",
 341                         sahara_cha_errsrc[chasrc]);
 342                 dev_err(dev->device, "          * %s.\n",
 343                        sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
 344         }
 345         dev_err(dev->device, "\n");
 346 }
 347 
 348 static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
 349 
 350 static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
 351 {
 352         u8 state;
 353 
 354         if (!__is_defined(DEBUG))
 355                 return;
 356 
 357         state = SAHARA_STATUS_GET_STATE(status);
 358 
 359         dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
 360                 __func__, status);
 361 
 362         dev_dbg(dev->device, "  - State = %d:\n", state);
 363         if (state & SAHARA_STATE_COMP_FLAG)
 364                 dev_dbg(dev->device, "          * Descriptor completed. IRQ pending.\n");
 365 
 366         dev_dbg(dev->device, "          * %s.\n",
 367                sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
 368 
 369         if (status & SAHARA_STATUS_DAR_FULL)
 370                 dev_dbg(dev->device, "  - DAR Full.\n");
 371         if (status & SAHARA_STATUS_ERROR)
 372                 dev_dbg(dev->device, "  - Error.\n");
 373         if (status & SAHARA_STATUS_SECURE)
 374                 dev_dbg(dev->device, "  - Secure.\n");
 375         if (status & SAHARA_STATUS_FAIL)
 376                 dev_dbg(dev->device, "  - Fail.\n");
 377         if (status & SAHARA_STATUS_RNG_RESEED)
 378                 dev_dbg(dev->device, "  - RNG Reseed Request.\n");
 379         if (status & SAHARA_STATUS_ACTIVE_RNG)
 380                 dev_dbg(dev->device, "  - RNG Active.\n");
 381         if (status & SAHARA_STATUS_ACTIVE_MDHA)
 382                 dev_dbg(dev->device, "  - MDHA Active.\n");
 383         if (status & SAHARA_STATUS_ACTIVE_SKHA)
 384                 dev_dbg(dev->device, "  - SKHA Active.\n");
 385 
 386         if (status & SAHARA_STATUS_MODE_BATCH)
 387                 dev_dbg(dev->device, "  - Batch Mode.\n");
 388         else if (status & SAHARA_STATUS_MODE_DEDICATED)
 389                 dev_dbg(dev->device, "  - Dedicated Mode.\n");
 390         else if (status & SAHARA_STATUS_MODE_DEBUG)
 391                 dev_dbg(dev->device, "  - Debug Mode.\n");
 392 
 393         dev_dbg(dev->device, "  - Internal state = 0x%02x\n",
 394                SAHARA_STATUS_GET_ISTATE(status));
 395 
 396         dev_dbg(dev->device, "Current DAR: 0x%08x\n",
 397                 sahara_read(dev, SAHARA_REG_CDAR));
 398         dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
 399                 sahara_read(dev, SAHARA_REG_IDAR));
 400 }
 401 
 402 static void sahara_dump_descriptors(struct sahara_dev *dev)
 403 {
 404         int i;
 405 
 406         if (!__is_defined(DEBUG))
 407                 return;
 408 
 409         for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
 410                 dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
 411                         i, &dev->hw_phys_desc[i]);
 412                 dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
 413                 dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
 414                 dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
 415                 dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
 416                 dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
 417                 dev_dbg(dev->device, "\tnext = 0x%08x\n",
 418                         dev->hw_desc[i]->next);
 419         }
 420         dev_dbg(dev->device, "\n");
 421 }
 422 
 423 static void sahara_dump_links(struct sahara_dev *dev)
 424 {
 425         int i;
 426 
 427         if (!__is_defined(DEBUG))
 428                 return;
 429 
 430         for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
 431                 dev_dbg(dev->device, "Link (%d) (%pad):\n",
 432                         i, &dev->hw_phys_link[i]);
 433                 dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
 434                 dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
 435                 dev_dbg(dev->device, "\tnext = 0x%08x\n",
 436                         dev->hw_link[i]->next);
 437         }
 438         dev_dbg(dev->device, "\n");
 439 }
 440 
 441 static int sahara_hw_descriptor_create(struct sahara_dev *dev)
 442 {
 443         struct sahara_ctx *ctx = dev->ctx;
 444         struct scatterlist *sg;
 445         int ret;
 446         int i, j;
 447         int idx = 0;
 448 
 449         /* Copy new key if necessary */
 450         if (ctx->flags & FLAGS_NEW_KEY) {
 451                 memcpy(dev->key_base, ctx->key, ctx->keylen);
 452                 ctx->flags &= ~FLAGS_NEW_KEY;
 453 
 454                 if (dev->flags & FLAGS_CBC) {
 455                         dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
 456                         dev->hw_desc[idx]->p1 = dev->iv_phys_base;
 457                 } else {
 458                         dev->hw_desc[idx]->len1 = 0;
 459                         dev->hw_desc[idx]->p1 = 0;
 460                 }
 461                 dev->hw_desc[idx]->len2 = ctx->keylen;
 462                 dev->hw_desc[idx]->p2 = dev->key_phys_base;
 463                 dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
 464 
 465                 dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
 466 
 467                 idx++;
 468         }
 469 
 470         dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
 471         if (dev->nb_in_sg < 0) {
 472                 dev_err(dev->device, "Invalid numbers of src SG.\n");
 473                 return dev->nb_in_sg;
 474         }
 475         dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
 476         if (dev->nb_out_sg < 0) {
 477                 dev_err(dev->device, "Invalid numbers of dst SG.\n");
 478                 return dev->nb_out_sg;
 479         }
 480         if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
 481                 dev_err(dev->device, "not enough hw links (%d)\n",
 482                         dev->nb_in_sg + dev->nb_out_sg);
 483                 return -EINVAL;
 484         }
 485 
 486         ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 487                          DMA_TO_DEVICE);
 488         if (ret != dev->nb_in_sg) {
 489                 dev_err(dev->device, "couldn't map in sg\n");
 490                 goto unmap_in;
 491         }
 492         ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 493                          DMA_FROM_DEVICE);
 494         if (ret != dev->nb_out_sg) {
 495                 dev_err(dev->device, "couldn't map out sg\n");
 496                 goto unmap_out;
 497         }
 498 
 499         /* Create input links */
 500         dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
 501         sg = dev->in_sg;
 502         for (i = 0; i < dev->nb_in_sg; i++) {
 503                 dev->hw_link[i]->len = sg->length;
 504                 dev->hw_link[i]->p = sg->dma_address;
 505                 if (i == (dev->nb_in_sg - 1)) {
 506                         dev->hw_link[i]->next = 0;
 507                 } else {
 508                         dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
 509                         sg = sg_next(sg);
 510                 }
 511         }
 512 
 513         /* Create output links */
 514         dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
 515         sg = dev->out_sg;
 516         for (j = i; j < dev->nb_out_sg + i; j++) {
 517                 dev->hw_link[j]->len = sg->length;
 518                 dev->hw_link[j]->p = sg->dma_address;
 519                 if (j == (dev->nb_out_sg + i - 1)) {
 520                         dev->hw_link[j]->next = 0;
 521                 } else {
 522                         dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
 523                         sg = sg_next(sg);
 524                 }
 525         }
 526 
 527         /* Fill remaining fields of hw_desc[1] */
 528         dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
 529         dev->hw_desc[idx]->len1 = dev->total;
 530         dev->hw_desc[idx]->len2 = dev->total;
 531         dev->hw_desc[idx]->next = 0;
 532 
 533         sahara_dump_descriptors(dev);
 534         sahara_dump_links(dev);
 535 
 536         sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
 537 
 538         return 0;
 539 
 540 unmap_out:
 541         dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 542                 DMA_FROM_DEVICE);
 543 unmap_in:
 544         dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 545                 DMA_TO_DEVICE);
 546 
 547         return -EINVAL;
 548 }
 549 
 550 static int sahara_aes_process(struct ablkcipher_request *req)
 551 {
 552         struct sahara_dev *dev = dev_ptr;
 553         struct sahara_ctx *ctx;
 554         struct sahara_aes_reqctx *rctx;
 555         int ret;
 556         unsigned long timeout;
 557 
 558         /* Request is ready to be dispatched by the device */
 559         dev_dbg(dev->device,
 560                 "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
 561                 req->nbytes, req->src, req->dst);
 562 
 563         /* assign new request to device */
 564         dev->total = req->nbytes;
 565         dev->in_sg = req->src;
 566         dev->out_sg = req->dst;
 567 
 568         rctx = ablkcipher_request_ctx(req);
 569         ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
 570         rctx->mode &= FLAGS_MODE_MASK;
 571         dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
 572 
 573         if ((dev->flags & FLAGS_CBC) && req->info)
 574                 memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
 575 
 576         /* assign new context to device */
 577         dev->ctx = ctx;
 578 
 579         reinit_completion(&dev->dma_completion);
 580 
 581         ret = sahara_hw_descriptor_create(dev);
 582         if (ret)
 583                 return -EINVAL;
 584 
 585         timeout = wait_for_completion_timeout(&dev->dma_completion,
 586                                 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
 587         if (!timeout) {
 588                 dev_err(dev->device, "AES timeout\n");
 589                 return -ETIMEDOUT;
 590         }
 591 
 592         dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 593                 DMA_FROM_DEVICE);
 594         dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 595                 DMA_TO_DEVICE);
 596 
 597         return 0;
 598 }
 599 
 600 static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 601                              unsigned int keylen)
 602 {
 603         struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 604         int ret;
 605 
 606         ctx->keylen = keylen;
 607 
 608         /* SAHARA only supports 128bit keys */
 609         if (keylen == AES_KEYSIZE_128) {
 610                 memcpy(ctx->key, key, keylen);
 611                 ctx->flags |= FLAGS_NEW_KEY;
 612                 return 0;
 613         }
 614 
 615         if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
 616                 return -EINVAL;
 617 
 618         /*
 619          * The requested key size is not supported by HW, do a fallback.
 620          */
 621         crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
 622         crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
 623                                                  CRYPTO_TFM_REQ_MASK);
 624 
 625         ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
 626 
 627         tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
 628         tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(ctx->fallback) &
 629                                CRYPTO_TFM_RES_MASK;
 630         return ret;
 631 }
 632 
 633 static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
 634 {
 635         struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
 636         struct sahara_dev *dev = dev_ptr;
 637         int err = 0;
 638 
 639         dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
 640                 req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
 641 
 642         if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
 643                 dev_err(dev->device,
 644                         "request size is not exact amount of AES blocks\n");
 645                 return -EINVAL;
 646         }
 647 
 648         rctx->mode = mode;
 649 
 650         mutex_lock(&dev->queue_mutex);
 651         err = ablkcipher_enqueue_request(&dev->queue, req);
 652         mutex_unlock(&dev->queue_mutex);
 653 
 654         wake_up_process(dev->kthread);
 655 
 656         return err;
 657 }
 658 
 659 static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
 660 {
 661         struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
 662                 crypto_ablkcipher_reqtfm(req));
 663         int err;
 664 
 665         if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 666                 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
 667 
 668                 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
 669                 skcipher_request_set_callback(subreq, req->base.flags,
 670                                               NULL, NULL);
 671                 skcipher_request_set_crypt(subreq, req->src, req->dst,
 672                                            req->nbytes, req->info);
 673                 err = crypto_skcipher_encrypt(subreq);
 674                 skcipher_request_zero(subreq);
 675                 return err;
 676         }
 677 
 678         return sahara_aes_crypt(req, FLAGS_ENCRYPT);
 679 }
 680 
 681 static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
 682 {
 683         struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
 684                 crypto_ablkcipher_reqtfm(req));
 685         int err;
 686 
 687         if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 688                 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
 689 
 690                 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
 691                 skcipher_request_set_callback(subreq, req->base.flags,
 692                                               NULL, NULL);
 693                 skcipher_request_set_crypt(subreq, req->src, req->dst,
 694                                            req->nbytes, req->info);
 695                 err = crypto_skcipher_decrypt(subreq);
 696                 skcipher_request_zero(subreq);
 697                 return err;
 698         }
 699 
 700         return sahara_aes_crypt(req, 0);
 701 }
 702 
 703 static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
 704 {
 705         struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
 706                 crypto_ablkcipher_reqtfm(req));
 707         int err;
 708 
 709         if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 710                 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
 711 
 712                 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
 713                 skcipher_request_set_callback(subreq, req->base.flags,
 714                                               NULL, NULL);
 715                 skcipher_request_set_crypt(subreq, req->src, req->dst,
 716                                            req->nbytes, req->info);
 717                 err = crypto_skcipher_encrypt(subreq);
 718                 skcipher_request_zero(subreq);
 719                 return err;
 720         }
 721 
 722         return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
 723 }
 724 
 725 static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
 726 {
 727         struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
 728                 crypto_ablkcipher_reqtfm(req));
 729         int err;
 730 
 731         if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 732                 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
 733 
 734                 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
 735                 skcipher_request_set_callback(subreq, req->base.flags,
 736                                               NULL, NULL);
 737                 skcipher_request_set_crypt(subreq, req->src, req->dst,
 738                                            req->nbytes, req->info);
 739                 err = crypto_skcipher_decrypt(subreq);
 740                 skcipher_request_zero(subreq);
 741                 return err;
 742         }
 743 
 744         return sahara_aes_crypt(req, FLAGS_CBC);
 745 }
 746 
 747 static int sahara_aes_cra_init(struct crypto_tfm *tfm)
 748 {
 749         const char *name = crypto_tfm_alg_name(tfm);
 750         struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
 751 
 752         ctx->fallback = crypto_alloc_sync_skcipher(name, 0,
 753                                               CRYPTO_ALG_NEED_FALLBACK);
 754         if (IS_ERR(ctx->fallback)) {
 755                 pr_err("Error allocating fallback algo %s\n", name);
 756                 return PTR_ERR(ctx->fallback);
 757         }
 758 
 759         tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);
 760 
 761         return 0;
 762 }
 763 
 764 static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
 765 {
 766         struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
 767 
 768         crypto_free_sync_skcipher(ctx->fallback);
 769 }
 770 
 771 static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
 772                               struct sahara_sha_reqctx *rctx)
 773 {
 774         u32 hdr = 0;
 775 
 776         hdr = rctx->mode;
 777 
 778         if (rctx->first) {
 779                 hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
 780                 hdr |= SAHARA_HDR_MDHA_INIT;
 781         } else {
 782                 hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
 783         }
 784 
 785         if (rctx->last)
 786                 hdr |= SAHARA_HDR_MDHA_PDATA;
 787 
 788         if (hweight_long(hdr) % 2 == 0)
 789                 hdr |= SAHARA_HDR_PARITY_BIT;
 790 
 791         return hdr;
 792 }
 793 
 794 static int sahara_sha_hw_links_create(struct sahara_dev *dev,
 795                                        struct sahara_sha_reqctx *rctx,
 796                                        int start)
 797 {
 798         struct scatterlist *sg;
 799         unsigned int i;
 800         int ret;
 801 
 802         dev->in_sg = rctx->in_sg;
 803 
 804         dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
 805         if (dev->nb_in_sg < 0) {
 806                 dev_err(dev->device, "Invalid numbers of src SG.\n");
 807                 return dev->nb_in_sg;
 808         }
 809         if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
 810                 dev_err(dev->device, "not enough hw links (%d)\n",
 811                         dev->nb_in_sg + dev->nb_out_sg);
 812                 return -EINVAL;
 813         }
 814 
 815         sg = dev->in_sg;
 816         ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
 817         if (!ret)
 818                 return -EFAULT;
 819 
 820         for (i = start; i < dev->nb_in_sg + start; i++) {
 821                 dev->hw_link[i]->len = sg->length;
 822                 dev->hw_link[i]->p = sg->dma_address;
 823                 if (i == (dev->nb_in_sg + start - 1)) {
 824                         dev->hw_link[i]->next = 0;
 825                 } else {
 826                         dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
 827                         sg = sg_next(sg);
 828                 }
 829         }
 830 
 831         return i;
 832 }
 833 
 834 static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
 835                                                 struct sahara_sha_reqctx *rctx,
 836                                                 struct ahash_request *req,
 837                                                 int index)
 838 {
 839         unsigned result_len;
 840         int i = index;
 841 
 842         if (rctx->first)
 843                 /* Create initial descriptor: #8*/
 844                 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
 845         else
 846                 /* Create hash descriptor: #10. Must follow #6. */
 847                 dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
 848 
 849         dev->hw_desc[index]->len1 = rctx->total;
 850         if (dev->hw_desc[index]->len1 == 0) {
 851                 /* if len1 is 0, p1 must be 0, too */
 852                 dev->hw_desc[index]->p1 = 0;
 853                 rctx->sg_in_idx = 0;
 854         } else {
 855                 /* Create input links */
 856                 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
 857                 i = sahara_sha_hw_links_create(dev, rctx, index);
 858 
 859                 rctx->sg_in_idx = index;
 860                 if (i < 0)
 861                         return i;
 862         }
 863 
 864         dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
 865 
 866         /* Save the context for the next operation */
 867         result_len = rctx->context_size;
 868         dev->hw_link[i]->p = dev->context_phys_base;
 869 
 870         dev->hw_link[i]->len = result_len;
 871         dev->hw_desc[index]->len2 = result_len;
 872 
 873         dev->hw_link[i]->next = 0;
 874 
 875         return 0;
 876 }
 877 
 878 /*
 879  * Load descriptor aka #6
 880  *
 881  * To load a previously saved context back to the MDHA unit
 882  *
 883  * p1: Saved Context
 884  * p2: NULL
 885  *
 886  */
 887 static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
 888                                                 struct sahara_sha_reqctx *rctx,
 889                                                 struct ahash_request *req,
 890                                                 int index)
 891 {
 892         dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
 893 
 894         dev->hw_desc[index]->len1 = rctx->context_size;
 895         dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
 896         dev->hw_desc[index]->len2 = 0;
 897         dev->hw_desc[index]->p2 = 0;
 898 
 899         dev->hw_link[index]->len = rctx->context_size;
 900         dev->hw_link[index]->p = dev->context_phys_base;
 901         dev->hw_link[index]->next = 0;
 902 
 903         return 0;
 904 }
 905 
 906 static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
 907 {
 908         if (!sg || !sg->length)
 909                 return nbytes;
 910 
 911         while (nbytes && sg) {
 912                 if (nbytes <= sg->length) {
 913                         sg->length = nbytes;
 914                         sg_mark_end(sg);
 915                         break;
 916                 }
 917                 nbytes -= sg->length;
 918                 sg = sg_next(sg);
 919         }
 920 
 921         return nbytes;
 922 }
 923 
 924 static int sahara_sha_prepare_request(struct ahash_request *req)
 925 {
 926         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 927         struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
 928         unsigned int hash_later;
 929         unsigned int block_size;
 930         unsigned int len;
 931 
 932         block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
 933 
 934         /* append bytes from previous operation */
 935         len = rctx->buf_cnt + req->nbytes;
 936 
 937         /* only the last transfer can be padded in hardware */
 938         if (!rctx->last && (len < block_size)) {
 939                 /* to few data, save for next operation */
 940                 scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
 941                                          0, req->nbytes, 0);
 942                 rctx->buf_cnt += req->nbytes;
 943 
 944                 return 0;
 945         }
 946 
 947         /* add data from previous operation first */
 948         if (rctx->buf_cnt)
 949                 memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
 950 
 951         /* data must always be a multiple of block_size */
 952         hash_later = rctx->last ? 0 : len & (block_size - 1);
 953         if (hash_later) {
 954                 unsigned int offset = req->nbytes - hash_later;
 955                 /* Save remaining bytes for later use */
 956                 scatterwalk_map_and_copy(rctx->buf, req->src, offset,
 957                                         hash_later, 0);
 958         }
 959 
 960         /* nbytes should now be multiple of blocksize */
 961         req->nbytes = req->nbytes - hash_later;
 962 
 963         sahara_walk_and_recalc(req->src, req->nbytes);
 964 
 965         /* have data from previous operation and current */
 966         if (rctx->buf_cnt && req->nbytes) {
 967                 sg_init_table(rctx->in_sg_chain, 2);
 968                 sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
 969 
 970                 sg_chain(rctx->in_sg_chain, 2, req->src);
 971 
 972                 rctx->total = req->nbytes + rctx->buf_cnt;
 973                 rctx->in_sg = rctx->in_sg_chain;
 974 
 975                 req->src = rctx->in_sg_chain;
 976         /* only data from previous operation */
 977         } else if (rctx->buf_cnt) {
 978                 if (req->src)
 979                         rctx->in_sg = req->src;
 980                 else
 981                         rctx->in_sg = rctx->in_sg_chain;
 982                 /* buf was copied into rembuf above */
 983                 sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
 984                 rctx->total = rctx->buf_cnt;
 985         /* no data from previous operation */
 986         } else {
 987                 rctx->in_sg = req->src;
 988                 rctx->total = req->nbytes;
 989                 req->src = rctx->in_sg;
 990         }
 991 
 992         /* on next call, we only have the remaining data in the buffer */
 993         rctx->buf_cnt = hash_later;
 994 
 995         return -EINPROGRESS;
 996 }
 997 
 998 static int sahara_sha_process(struct ahash_request *req)
 999 {
1000         struct sahara_dev *dev = dev_ptr;
1001         struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1002         int ret;
1003         unsigned long timeout;
1004 
1005         ret = sahara_sha_prepare_request(req);
1006         if (!ret)
1007                 return ret;
1008 
1009         if (rctx->first) {
1010                 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
1011                 dev->hw_desc[0]->next = 0;
1012                 rctx->first = 0;
1013         } else {
1014                 memcpy(dev->context_base, rctx->context, rctx->context_size);
1015 
1016                 sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
1017                 dev->hw_desc[0]->next = dev->hw_phys_desc[1];
1018                 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
1019                 dev->hw_desc[1]->next = 0;
1020         }
1021 
1022         sahara_dump_descriptors(dev);
1023         sahara_dump_links(dev);
1024 
1025         reinit_completion(&dev->dma_completion);
1026 
1027         sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1028 
1029         timeout = wait_for_completion_timeout(&dev->dma_completion,
1030                                 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
1031         if (!timeout) {
1032                 dev_err(dev->device, "SHA timeout\n");
1033                 return -ETIMEDOUT;
1034         }
1035 
1036         if (rctx->sg_in_idx)
1037                 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1038                              DMA_TO_DEVICE);
1039 
1040         memcpy(rctx->context, dev->context_base, rctx->context_size);
1041 
1042         if (req->result)
1043                 memcpy(req->result, rctx->context, rctx->digest_size);
1044 
1045         return 0;
1046 }
1047 
1048 static int sahara_queue_manage(void *data)
1049 {
1050         struct sahara_dev *dev = (struct sahara_dev *)data;
1051         struct crypto_async_request *async_req;
1052         struct crypto_async_request *backlog;
1053         int ret = 0;
1054 
1055         do {
1056                 __set_current_state(TASK_INTERRUPTIBLE);
1057 
1058                 mutex_lock(&dev->queue_mutex);
1059                 backlog = crypto_get_backlog(&dev->queue);
1060                 async_req = crypto_dequeue_request(&dev->queue);
1061                 mutex_unlock(&dev->queue_mutex);
1062 
1063                 if (backlog)
1064                         backlog->complete(backlog, -EINPROGRESS);
1065 
1066                 if (async_req) {
1067                         if (crypto_tfm_alg_type(async_req->tfm) ==
1068                             CRYPTO_ALG_TYPE_AHASH) {
1069                                 struct ahash_request *req =
1070                                         ahash_request_cast(async_req);
1071 
1072                                 ret = sahara_sha_process(req);
1073                         } else {
1074                                 struct ablkcipher_request *req =
1075                                         ablkcipher_request_cast(async_req);
1076 
1077                                 ret = sahara_aes_process(req);
1078                         }
1079 
1080                         async_req->complete(async_req, ret);
1081 
1082                         continue;
1083                 }
1084 
1085                 schedule();
1086         } while (!kthread_should_stop());
1087 
1088         return 0;
1089 }
1090 
1091 static int sahara_sha_enqueue(struct ahash_request *req, int last)
1092 {
1093         struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1094         struct sahara_dev *dev = dev_ptr;
1095         int ret;
1096 
1097         if (!req->nbytes && !last)
1098                 return 0;
1099 
1100         rctx->last = last;
1101 
1102         if (!rctx->active) {
1103                 rctx->active = 1;
1104                 rctx->first = 1;
1105         }
1106 
1107         mutex_lock(&dev->queue_mutex);
1108         ret = crypto_enqueue_request(&dev->queue, &req->base);
1109         mutex_unlock(&dev->queue_mutex);
1110 
1111         wake_up_process(dev->kthread);
1112 
1113         return ret;
1114 }
1115 
1116 static int sahara_sha_init(struct ahash_request *req)
1117 {
1118         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1119         struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1120 
1121         memset(rctx, 0, sizeof(*rctx));
1122 
1123         switch (crypto_ahash_digestsize(tfm)) {
1124         case SHA1_DIGEST_SIZE:
1125                 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1126                 rctx->digest_size = SHA1_DIGEST_SIZE;
1127                 break;
1128         case SHA256_DIGEST_SIZE:
1129                 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1130                 rctx->digest_size = SHA256_DIGEST_SIZE;
1131                 break;
1132         default:
1133                 return -EINVAL;
1134         }
1135 
1136         rctx->context_size = rctx->digest_size + 4;
1137         rctx->active = 0;
1138 
1139         return 0;
1140 }
1141 
1142 static int sahara_sha_update(struct ahash_request *req)
1143 {
1144         return sahara_sha_enqueue(req, 0);
1145 }
1146 
1147 static int sahara_sha_final(struct ahash_request *req)
1148 {
1149         req->nbytes = 0;
1150         return sahara_sha_enqueue(req, 1);
1151 }
1152 
1153 static int sahara_sha_finup(struct ahash_request *req)
1154 {
1155         return sahara_sha_enqueue(req, 1);
1156 }
1157 
1158 static int sahara_sha_digest(struct ahash_request *req)
1159 {
1160         sahara_sha_init(req);
1161 
1162         return sahara_sha_finup(req);
1163 }
1164 
1165 static int sahara_sha_export(struct ahash_request *req, void *out)
1166 {
1167         struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1168 
1169         memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
1170 
1171         return 0;
1172 }
1173 
1174 static int sahara_sha_import(struct ahash_request *req, const void *in)
1175 {
1176         struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1177 
1178         memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
1179 
1180         return 0;
1181 }
1182 
1183 static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1184 {
1185         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1186                                  sizeof(struct sahara_sha_reqctx) +
1187                                  SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
1188 
1189         return 0;
1190 }
1191 
1192 static struct crypto_alg aes_algs[] = {
1193 {
1194         .cra_name               = "ecb(aes)",
1195         .cra_driver_name        = "sahara-ecb-aes",
1196         .cra_priority           = 300,
1197         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER |
1198                         CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1199         .cra_blocksize          = AES_BLOCK_SIZE,
1200         .cra_ctxsize            = sizeof(struct sahara_ctx),
1201         .cra_alignmask          = 0x0,
1202         .cra_type               = &crypto_ablkcipher_type,
1203         .cra_module             = THIS_MODULE,
1204         .cra_init               = sahara_aes_cra_init,
1205         .cra_exit               = sahara_aes_cra_exit,
1206         .cra_u.ablkcipher = {
1207                 .min_keysize    = AES_MIN_KEY_SIZE ,
1208                 .max_keysize    = AES_MAX_KEY_SIZE,
1209                 .setkey         = sahara_aes_setkey,
1210                 .encrypt        = sahara_aes_ecb_encrypt,
1211                 .decrypt        = sahara_aes_ecb_decrypt,
1212         }
1213 }, {
1214         .cra_name               = "cbc(aes)",
1215         .cra_driver_name        = "sahara-cbc-aes",
1216         .cra_priority           = 300,
1217         .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER |
1218                         CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1219         .cra_blocksize          = AES_BLOCK_SIZE,
1220         .cra_ctxsize            = sizeof(struct sahara_ctx),
1221         .cra_alignmask          = 0x0,
1222         .cra_type               = &crypto_ablkcipher_type,
1223         .cra_module             = THIS_MODULE,
1224         .cra_init               = sahara_aes_cra_init,
1225         .cra_exit               = sahara_aes_cra_exit,
1226         .cra_u.ablkcipher = {
1227                 .min_keysize    = AES_MIN_KEY_SIZE ,
1228                 .max_keysize    = AES_MAX_KEY_SIZE,
1229                 .ivsize         = AES_BLOCK_SIZE,
1230                 .setkey         = sahara_aes_setkey,
1231                 .encrypt        = sahara_aes_cbc_encrypt,
1232                 .decrypt        = sahara_aes_cbc_decrypt,
1233         }
1234 }
1235 };
1236 
1237 static struct ahash_alg sha_v3_algs[] = {
1238 {
1239         .init           = sahara_sha_init,
1240         .update         = sahara_sha_update,
1241         .final          = sahara_sha_final,
1242         .finup          = sahara_sha_finup,
1243         .digest         = sahara_sha_digest,
1244         .export         = sahara_sha_export,
1245         .import         = sahara_sha_import,
1246         .halg.digestsize        = SHA1_DIGEST_SIZE,
1247         .halg.statesize         = sizeof(struct sahara_sha_reqctx),
1248         .halg.base      = {
1249                 .cra_name               = "sha1",
1250                 .cra_driver_name        = "sahara-sha1",
1251                 .cra_priority           = 300,
1252                 .cra_flags              = CRYPTO_ALG_ASYNC |
1253                                                 CRYPTO_ALG_NEED_FALLBACK,
1254                 .cra_blocksize          = SHA1_BLOCK_SIZE,
1255                 .cra_ctxsize            = sizeof(struct sahara_ctx),
1256                 .cra_alignmask          = 0,
1257                 .cra_module             = THIS_MODULE,
1258                 .cra_init               = sahara_sha_cra_init,
1259         }
1260 },
1261 };
1262 
1263 static struct ahash_alg sha_v4_algs[] = {
1264 {
1265         .init           = sahara_sha_init,
1266         .update         = sahara_sha_update,
1267         .final          = sahara_sha_final,
1268         .finup          = sahara_sha_finup,
1269         .digest         = sahara_sha_digest,
1270         .export         = sahara_sha_export,
1271         .import         = sahara_sha_import,
1272         .halg.digestsize        = SHA256_DIGEST_SIZE,
1273         .halg.statesize         = sizeof(struct sahara_sha_reqctx),
1274         .halg.base      = {
1275                 .cra_name               = "sha256",
1276                 .cra_driver_name        = "sahara-sha256",
1277                 .cra_priority           = 300,
1278                 .cra_flags              = CRYPTO_ALG_ASYNC |
1279                                                 CRYPTO_ALG_NEED_FALLBACK,
1280                 .cra_blocksize          = SHA256_BLOCK_SIZE,
1281                 .cra_ctxsize            = sizeof(struct sahara_ctx),
1282                 .cra_alignmask          = 0,
1283                 .cra_module             = THIS_MODULE,
1284                 .cra_init               = sahara_sha_cra_init,
1285         }
1286 },
1287 };
1288 
1289 static irqreturn_t sahara_irq_handler(int irq, void *data)
1290 {
1291         struct sahara_dev *dev = (struct sahara_dev *)data;
1292         unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1293         unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1294 
1295         sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1296                      SAHARA_REG_CMD);
1297 
1298         sahara_decode_status(dev, stat);
1299 
1300         if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1301                 return IRQ_NONE;
1302         } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1303                 dev->error = 0;
1304         } else {
1305                 sahara_decode_error(dev, err);
1306                 dev->error = -EINVAL;
1307         }
1308 
1309         complete(&dev->dma_completion);
1310 
1311         return IRQ_HANDLED;
1312 }
1313 
1314 
1315 static int sahara_register_algs(struct sahara_dev *dev)
1316 {
1317         int err;
1318         unsigned int i, j, k, l;
1319 
1320         for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1321                 err = crypto_register_alg(&aes_algs[i]);
1322                 if (err)
1323                         goto err_aes_algs;
1324         }
1325 
1326         for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1327                 err = crypto_register_ahash(&sha_v3_algs[k]);
1328                 if (err)
1329                         goto err_sha_v3_algs;
1330         }
1331 
1332         if (dev->version > SAHARA_VERSION_3)
1333                 for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1334                         err = crypto_register_ahash(&sha_v4_algs[l]);
1335                         if (err)
1336                                 goto err_sha_v4_algs;
1337                 }
1338 
1339         return 0;
1340 
1341 err_sha_v4_algs:
1342         for (j = 0; j < l; j++)
1343                 crypto_unregister_ahash(&sha_v4_algs[j]);
1344 
1345 err_sha_v3_algs:
1346         for (j = 0; j < k; j++)
1347                 crypto_unregister_ahash(&sha_v3_algs[j]);
1348 
1349 err_aes_algs:
1350         for (j = 0; j < i; j++)
1351                 crypto_unregister_alg(&aes_algs[j]);
1352 
1353         return err;
1354 }
1355 
1356 static void sahara_unregister_algs(struct sahara_dev *dev)
1357 {
1358         unsigned int i;
1359 
1360         for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1361                 crypto_unregister_alg(&aes_algs[i]);
1362 
1363         for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
1364                 crypto_unregister_ahash(&sha_v3_algs[i]);
1365 
1366         if (dev->version > SAHARA_VERSION_3)
1367                 for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1368                         crypto_unregister_ahash(&sha_v4_algs[i]);
1369 }
1370 
1371 static const struct platform_device_id sahara_platform_ids[] = {
1372         { .name = "sahara-imx27" },
1373         { /* sentinel */ }
1374 };
1375 MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
1376 
1377 static const struct of_device_id sahara_dt_ids[] = {
1378         { .compatible = "fsl,imx53-sahara" },
1379         { .compatible = "fsl,imx27-sahara" },
1380         { /* sentinel */ }
1381 };
1382 MODULE_DEVICE_TABLE(of, sahara_dt_ids);
1383 
1384 static int sahara_probe(struct platform_device *pdev)
1385 {
1386         struct sahara_dev *dev;
1387         u32 version;
1388         int irq;
1389         int err;
1390         int i;
1391 
1392         dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1393         if (!dev)
1394                 return -ENOMEM;
1395 
1396         dev->device = &pdev->dev;
1397         platform_set_drvdata(pdev, dev);
1398 
1399         /* Get the base address */
1400         dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
1401         if (IS_ERR(dev->regs_base))
1402                 return PTR_ERR(dev->regs_base);
1403 
1404         /* Get the IRQ */
1405         irq = platform_get_irq(pdev,  0);
1406         if (irq < 0)
1407                 return irq;
1408 
1409         err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1410                                0, dev_name(&pdev->dev), dev);
1411         if (err) {
1412                 dev_err(&pdev->dev, "failed to request irq\n");
1413                 return err;
1414         }
1415 
1416         /* clocks */
1417         dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1418         if (IS_ERR(dev->clk_ipg)) {
1419                 dev_err(&pdev->dev, "Could not get ipg clock\n");
1420                 return PTR_ERR(dev->clk_ipg);
1421         }
1422 
1423         dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1424         if (IS_ERR(dev->clk_ahb)) {
1425                 dev_err(&pdev->dev, "Could not get ahb clock\n");
1426                 return PTR_ERR(dev->clk_ahb);
1427         }
1428 
1429         /* Allocate HW descriptors */
1430         dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
1431                         SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1432                         &dev->hw_phys_desc[0], GFP_KERNEL);
1433         if (!dev->hw_desc[0]) {
1434                 dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1435                 return -ENOMEM;
1436         }
1437         dev->hw_desc[1] = dev->hw_desc[0] + 1;
1438         dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1439                                 sizeof(struct sahara_hw_desc);
1440 
1441         /* Allocate space for iv and key */
1442         dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
1443                                 &dev->key_phys_base, GFP_KERNEL);
1444         if (!dev->key_base) {
1445                 dev_err(&pdev->dev, "Could not allocate memory for key\n");
1446                 return -ENOMEM;
1447         }
1448         dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1449         dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1450 
1451         /* Allocate space for context: largest digest + message length field */
1452         dev->context_base = dmam_alloc_coherent(&pdev->dev,
1453                                         SHA256_DIGEST_SIZE + 4,
1454                                         &dev->context_phys_base, GFP_KERNEL);
1455         if (!dev->context_base) {
1456                 dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
1457                 return -ENOMEM;
1458         }
1459 
1460         /* Allocate space for HW links */
1461         dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
1462                         SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1463                         &dev->hw_phys_link[0], GFP_KERNEL);
1464         if (!dev->hw_link[0]) {
1465                 dev_err(&pdev->dev, "Could not allocate hw links\n");
1466                 return -ENOMEM;
1467         }
1468         for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1469                 dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1470                                         sizeof(struct sahara_hw_link);
1471                 dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1472         }
1473 
1474         crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1475 
1476         mutex_init(&dev->queue_mutex);
1477 
1478         dev_ptr = dev;
1479 
1480         dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1481         if (IS_ERR(dev->kthread)) {
1482                 return PTR_ERR(dev->kthread);
1483         }
1484 
1485         init_completion(&dev->dma_completion);
1486 
1487         err = clk_prepare_enable(dev->clk_ipg);
1488         if (err)
1489                 return err;
1490         err = clk_prepare_enable(dev->clk_ahb);
1491         if (err)
1492                 goto clk_ipg_disable;
1493 
1494         version = sahara_read(dev, SAHARA_REG_VERSION);
1495         if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1496                 if (version != SAHARA_VERSION_3)
1497                         err = -ENODEV;
1498         } else if (of_device_is_compatible(pdev->dev.of_node,
1499                         "fsl,imx53-sahara")) {
1500                 if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1501                         err = -ENODEV;
1502                 version = (version >> 8) & 0xff;
1503         }
1504         if (err == -ENODEV) {
1505                 dev_err(&pdev->dev, "SAHARA version %d not supported\n",
1506                                 version);
1507                 goto err_algs;
1508         }
1509 
1510         dev->version = version;
1511 
1512         sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1513                      SAHARA_REG_CMD);
1514         sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1515                         SAHARA_CONTROL_SET_MAXBURST(8) |
1516                         SAHARA_CONTROL_RNG_AUTORSD |
1517                         SAHARA_CONTROL_ENABLE_INT,
1518                         SAHARA_REG_CONTROL);
1519 
1520         err = sahara_register_algs(dev);
1521         if (err)
1522                 goto err_algs;
1523 
1524         dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1525 
1526         return 0;
1527 
1528 err_algs:
1529         kthread_stop(dev->kthread);
1530         dev_ptr = NULL;
1531         clk_disable_unprepare(dev->clk_ahb);
1532 clk_ipg_disable:
1533         clk_disable_unprepare(dev->clk_ipg);
1534 
1535         return err;
1536 }
1537 
1538 static int sahara_remove(struct platform_device *pdev)
1539 {
1540         struct sahara_dev *dev = platform_get_drvdata(pdev);
1541 
1542         kthread_stop(dev->kthread);
1543 
1544         sahara_unregister_algs(dev);
1545 
1546         clk_disable_unprepare(dev->clk_ipg);
1547         clk_disable_unprepare(dev->clk_ahb);
1548 
1549         dev_ptr = NULL;
1550 
1551         return 0;
1552 }
1553 
1554 static struct platform_driver sahara_driver = {
1555         .probe          = sahara_probe,
1556         .remove         = sahara_remove,
1557         .driver         = {
1558                 .name   = SAHARA_NAME,
1559                 .of_match_table = sahara_dt_ids,
1560         },
1561         .id_table = sahara_platform_ids,
1562 };
1563 
1564 module_platform_driver(sahara_driver);
1565 
1566 MODULE_LICENSE("GPL");
1567 MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1568 MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
1569 MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");

/* [<][>][^][v][top][bottom][index][help] */