root/drivers/crypto/mediatek/mtk-platform.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mtk_desc_ring_link
  2. mtk_dfe_dse_buf_setup
  3. mtk_dfe_dse_state_check
  4. mtk_dfe_dse_reset
  5. mtk_cmd_desc_ring_setup
  6. mtk_res_desc_ring_setup
  7. mtk_packet_engine_setup
  8. mtk_aic_cap_check
  9. mtk_aic_init
  10. mtk_accelerator_init
  11. mtk_desc_dma_free
  12. mtk_desc_ring_alloc
  13. mtk_crypto_probe
  14. mtk_crypto_remove

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Driver for EIP97 cryptographic accelerator.
   4  *
   5  * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
   6  */
   7 
   8 #include <linux/clk.h>
   9 #include <linux/init.h>
  10 #include <linux/kernel.h>
  11 #include <linux/module.h>
  12 #include <linux/mod_devicetable.h>
  13 #include <linux/platform_device.h>
  14 #include <linux/pm_runtime.h>
  15 #include "mtk-platform.h"
  16 
  17 #define MTK_BURST_SIZE_MSK              GENMASK(7, 4)
  18 #define MTK_BURST_SIZE(x)               ((x) << 4)
  19 #define MTK_DESC_SIZE(x)                ((x) << 0)
  20 #define MTK_DESC_OFFSET(x)              ((x) << 16)
  21 #define MTK_DESC_FETCH_SIZE(x)          ((x) << 0)
  22 #define MTK_DESC_FETCH_THRESH(x)        ((x) << 16)
  23 #define MTK_DESC_OVL_IRQ_EN             BIT(25)
  24 #define MTK_DESC_ATP_PRESENT            BIT(30)
  25 
  26 #define MTK_DFSE_IDLE                   GENMASK(3, 0)
  27 #define MTK_DFSE_THR_CTRL_EN            BIT(30)
  28 #define MTK_DFSE_THR_CTRL_RESET         BIT(31)
  29 #define MTK_DFSE_RING_ID(x)             (((x) >> 12) & GENMASK(3, 0))
  30 #define MTK_DFSE_MIN_DATA(x)            ((x) << 0)
  31 #define MTK_DFSE_MAX_DATA(x)            ((x) << 8)
  32 #define MTK_DFE_MIN_CTRL(x)             ((x) << 16)
  33 #define MTK_DFE_MAX_CTRL(x)             ((x) << 24)
  34 
  35 #define MTK_IN_BUF_MIN_THRESH(x)        ((x) << 8)
  36 #define MTK_IN_BUF_MAX_THRESH(x)        ((x) << 12)
  37 #define MTK_OUT_BUF_MIN_THRESH(x)       ((x) << 0)
  38 #define MTK_OUT_BUF_MAX_THRESH(x)       ((x) << 4)
  39 #define MTK_IN_TBUF_SIZE(x)             (((x) >> 4) & GENMASK(3, 0))
  40 #define MTK_IN_DBUF_SIZE(x)             (((x) >> 8) & GENMASK(3, 0))
  41 #define MTK_OUT_DBUF_SIZE(x)            (((x) >> 16) & GENMASK(3, 0))
  42 #define MTK_CMD_FIFO_SIZE(x)            (((x) >> 8) & GENMASK(3, 0))
  43 #define MTK_RES_FIFO_SIZE(x)            (((x) >> 12) & GENMASK(3, 0))
  44 
  45 #define MTK_PE_TK_LOC_AVL               BIT(2)
  46 #define MTK_PE_PROC_HELD                BIT(14)
  47 #define MTK_PE_TK_TIMEOUT_EN            BIT(22)
  48 #define MTK_PE_INPUT_DMA_ERR            BIT(0)
  49 #define MTK_PE_OUTPUT_DMA_ERR           BIT(1)
  50 #define MTK_PE_PKT_PORC_ERR             BIT(2)
  51 #define MTK_PE_PKT_TIMEOUT              BIT(3)
  52 #define MTK_PE_FATAL_ERR                BIT(14)
  53 #define MTK_PE_INPUT_DMA_ERR_EN         BIT(16)
  54 #define MTK_PE_OUTPUT_DMA_ERR_EN        BIT(17)
  55 #define MTK_PE_PKT_PORC_ERR_EN          BIT(18)
  56 #define MTK_PE_PKT_TIMEOUT_EN           BIT(19)
  57 #define MTK_PE_FATAL_ERR_EN             BIT(30)
  58 #define MTK_PE_INT_OUT_EN               BIT(31)
  59 
  60 #define MTK_HIA_SIGNATURE               ((u16)0x35ca)
  61 #define MTK_HIA_DATA_WIDTH(x)           (((x) >> 25) & GENMASK(1, 0))
  62 #define MTK_HIA_DMA_LENGTH(x)           (((x) >> 20) & GENMASK(4, 0))
  63 #define MTK_CDR_STAT_CLR                GENMASK(4, 0)
  64 #define MTK_RDR_STAT_CLR                GENMASK(7, 0)
  65 
  66 #define MTK_AIC_INT_MSK                 GENMASK(5, 0)
  67 #define MTK_AIC_VER_MSK                 (GENMASK(15, 0) | GENMASK(27, 20))
  68 #define MTK_AIC_VER11                   0x011036c9
  69 #define MTK_AIC_VER12                   0x012036c9
  70 #define MTK_AIC_G_CLR                   GENMASK(30, 20)
  71 
  72 /**
  73  * EIP97 is an integrated security subsystem to accelerate cryptographic
  74  * functions and protocols to offload the host processor.
  75  * Some important hardware modules are briefly introduced below:
  76  *
  77  * Host Interface Adapter(HIA) - the main interface between the host
  78  * system and the hardware subsystem. It is responsible for attaching
  79  * processing engine to the specific host bus interface and provides a
  80  * standardized software view for off loading tasks to the engine.
  81  *
  82  * Command Descriptor Ring Manager(CDR Manager) - keeps track of how many
  83  * CD the host has prepared in the CDR. It monitors the fill level of its
  84  * CD-FIFO and if there's sufficient space for the next block of descriptors,
  85  * then it fires off a DMA request to fetch a block of CDs.
  86  *
  87  * Data fetch engine(DFE) - It is responsible for parsing the CD and
  88  * setting up the required control and packet data DMA transfers from
  89  * system memory to the processing engine.
  90  *
  91  * Result Descriptor Ring Manager(RDR Manager) - same as CDR Manager,
  92  * but target is result descriptors, Moreover, it also handles the RD
  93  * updates under control of the DSE. For each packet data segment
  94  * processed, the DSE triggers the RDR Manager to write the updated RD.
  95  * If triggered to update, the RDR Manager sets up a DMA operation to
  96  * copy the RD from the DSE to the correct location in the RDR.
  97  *
  98  * Data Store Engine(DSE) - It is responsible for parsing the prepared RD
  99  * and setting up the required control and packet data DMA transfers from
 100  * the processing engine to system memory.
 101  *
 102  * Advanced Interrupt Controllers(AICs) - receive interrupt request signals
 103  * from various sources and combine them into one interrupt output.
 104  * The AICs are used by:
 105  * - One for the HIA global and processing engine interrupts.
 106  * - The others for the descriptor ring interrupts.
 107  */
 108 
 109 /* Cryptographic engine capabilities */
 110 struct mtk_sys_cap {
 111         /* host interface adapter */
 112         u32 hia_ver;
 113         u32 hia_opt;
 114         /* packet engine */
 115         u32 pkt_eng_opt;
 116         /* global hardware */
 117         u32 hw_opt;
 118 };
 119 
 120 static void mtk_desc_ring_link(struct mtk_cryp *cryp, u32 mask)
 121 {
 122         /* Assign rings to DFE/DSE thread and enable it */
 123         writel(MTK_DFSE_THR_CTRL_EN | mask, cryp->base + DFE_THR_CTRL);
 124         writel(MTK_DFSE_THR_CTRL_EN | mask, cryp->base + DSE_THR_CTRL);
 125 }
 126 
 127 static void mtk_dfe_dse_buf_setup(struct mtk_cryp *cryp,
 128                                   struct mtk_sys_cap *cap)
 129 {
 130         u32 width = MTK_HIA_DATA_WIDTH(cap->hia_opt) + 2;
 131         u32 len = MTK_HIA_DMA_LENGTH(cap->hia_opt) - 1;
 132         u32 ipbuf = min((u32)MTK_IN_DBUF_SIZE(cap->hw_opt) + width, len);
 133         u32 opbuf = min((u32)MTK_OUT_DBUF_SIZE(cap->hw_opt) + width, len);
 134         u32 itbuf = min((u32)MTK_IN_TBUF_SIZE(cap->hw_opt) + width, len);
 135 
 136         writel(MTK_DFSE_MIN_DATA(ipbuf - 1) |
 137                MTK_DFSE_MAX_DATA(ipbuf) |
 138                MTK_DFE_MIN_CTRL(itbuf - 1) |
 139                MTK_DFE_MAX_CTRL(itbuf),
 140                cryp->base + DFE_CFG);
 141 
 142         writel(MTK_DFSE_MIN_DATA(opbuf - 1) |
 143                MTK_DFSE_MAX_DATA(opbuf),
 144                cryp->base + DSE_CFG);
 145 
 146         writel(MTK_IN_BUF_MIN_THRESH(ipbuf - 1) |
 147                MTK_IN_BUF_MAX_THRESH(ipbuf),
 148                cryp->base + PE_IN_DBUF_THRESH);
 149 
 150         writel(MTK_IN_BUF_MIN_THRESH(itbuf - 1) |
 151                MTK_IN_BUF_MAX_THRESH(itbuf),
 152                cryp->base + PE_IN_TBUF_THRESH);
 153 
 154         writel(MTK_OUT_BUF_MIN_THRESH(opbuf - 1) |
 155                MTK_OUT_BUF_MAX_THRESH(opbuf),
 156                cryp->base + PE_OUT_DBUF_THRESH);
 157 
 158         writel(0, cryp->base + PE_OUT_TBUF_THRESH);
 159         writel(0, cryp->base + PE_OUT_BUF_CTRL);
 160 }
 161 
 162 static int mtk_dfe_dse_state_check(struct mtk_cryp *cryp)
 163 {
 164         int ret = -EINVAL;
 165         u32 val;
 166 
 167         /* Check for completion of all DMA transfers */
 168         val = readl(cryp->base + DFE_THR_STAT);
 169         if (MTK_DFSE_RING_ID(val) == MTK_DFSE_IDLE) {
 170                 val = readl(cryp->base + DSE_THR_STAT);
 171                 if (MTK_DFSE_RING_ID(val) == MTK_DFSE_IDLE)
 172                         ret = 0;
 173         }
 174 
 175         if (!ret) {
 176                 /* Take DFE/DSE thread out of reset */
 177                 writel(0, cryp->base + DFE_THR_CTRL);
 178                 writel(0, cryp->base + DSE_THR_CTRL);
 179         } else {
 180                 return -EBUSY;
 181         }
 182 
 183         return 0;
 184 }
 185 
 186 static int mtk_dfe_dse_reset(struct mtk_cryp *cryp)
 187 {
 188         int err;
 189 
 190         /* Reset DSE/DFE and correct system priorities for all rings. */
 191         writel(MTK_DFSE_THR_CTRL_RESET, cryp->base + DFE_THR_CTRL);
 192         writel(0, cryp->base + DFE_PRIO_0);
 193         writel(0, cryp->base + DFE_PRIO_1);
 194         writel(0, cryp->base + DFE_PRIO_2);
 195         writel(0, cryp->base + DFE_PRIO_3);
 196 
 197         writel(MTK_DFSE_THR_CTRL_RESET, cryp->base + DSE_THR_CTRL);
 198         writel(0, cryp->base + DSE_PRIO_0);
 199         writel(0, cryp->base + DSE_PRIO_1);
 200         writel(0, cryp->base + DSE_PRIO_2);
 201         writel(0, cryp->base + DSE_PRIO_3);
 202 
 203         err = mtk_dfe_dse_state_check(cryp);
 204         if (err)
 205                 return err;
 206 
 207         return 0;
 208 }
 209 
 210 static void mtk_cmd_desc_ring_setup(struct mtk_cryp *cryp,
 211                                     int i, struct mtk_sys_cap *cap)
 212 {
 213         /* Full descriptor that fits FIFO minus one */
 214         u32 count =
 215                 ((1 << MTK_CMD_FIFO_SIZE(cap->hia_opt)) / MTK_DESC_SZ) - 1;
 216 
 217         /* Temporarily disable external triggering */
 218         writel(0, cryp->base + CDR_CFG(i));
 219 
 220         /* Clear CDR count */
 221         writel(MTK_CNT_RST, cryp->base + CDR_PREP_COUNT(i));
 222         writel(MTK_CNT_RST, cryp->base + CDR_PROC_COUNT(i));
 223 
 224         writel(0, cryp->base + CDR_PREP_PNTR(i));
 225         writel(0, cryp->base + CDR_PROC_PNTR(i));
 226         writel(0, cryp->base + CDR_DMA_CFG(i));
 227 
 228         /* Configure CDR host address space */
 229         writel(0, cryp->base + CDR_BASE_ADDR_HI(i));
 230         writel(cryp->ring[i]->cmd_dma, cryp->base + CDR_BASE_ADDR_LO(i));
 231 
 232         writel(MTK_DESC_RING_SZ, cryp->base + CDR_RING_SIZE(i));
 233 
 234         /* Clear and disable all CDR interrupts */
 235         writel(MTK_CDR_STAT_CLR, cryp->base + CDR_STAT(i));
 236 
 237         /*
 238          * Set command descriptor offset and enable additional
 239          * token present in descriptor.
 240          */
 241         writel(MTK_DESC_SIZE(MTK_DESC_SZ) |
 242                    MTK_DESC_OFFSET(MTK_DESC_OFF) |
 243                MTK_DESC_ATP_PRESENT,
 244                cryp->base + CDR_DESC_SIZE(i));
 245 
 246         writel(MTK_DESC_FETCH_SIZE(count * MTK_DESC_OFF) |
 247                    MTK_DESC_FETCH_THRESH(count * MTK_DESC_SZ),
 248                    cryp->base + CDR_CFG(i));
 249 }
 250 
 251 static void mtk_res_desc_ring_setup(struct mtk_cryp *cryp,
 252                                     int i, struct mtk_sys_cap *cap)
 253 {
 254         u32 rndup = 2;
 255         u32 count = ((1 << MTK_RES_FIFO_SIZE(cap->hia_opt)) / rndup) - 1;
 256 
 257         /* Temporarily disable external triggering */
 258         writel(0, cryp->base + RDR_CFG(i));
 259 
 260         /* Clear RDR count */
 261         writel(MTK_CNT_RST, cryp->base + RDR_PREP_COUNT(i));
 262         writel(MTK_CNT_RST, cryp->base + RDR_PROC_COUNT(i));
 263 
 264         writel(0, cryp->base + RDR_PREP_PNTR(i));
 265         writel(0, cryp->base + RDR_PROC_PNTR(i));
 266         writel(0, cryp->base + RDR_DMA_CFG(i));
 267 
 268         /* Configure RDR host address space */
 269         writel(0, cryp->base + RDR_BASE_ADDR_HI(i));
 270         writel(cryp->ring[i]->res_dma, cryp->base + RDR_BASE_ADDR_LO(i));
 271 
 272         writel(MTK_DESC_RING_SZ, cryp->base + RDR_RING_SIZE(i));
 273         writel(MTK_RDR_STAT_CLR, cryp->base + RDR_STAT(i));
 274 
 275         /*
 276          * RDR manager generates update interrupts on a per-completed-packet,
 277          * and the rd_proc_thresh_irq interrupt is fired when proc_pkt_count
 278          * for the RDR exceeds the number of packets.
 279          */
 280         writel(MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE,
 281                cryp->base + RDR_THRESH(i));
 282 
 283         /*
 284          * Configure a threshold and time-out value for the processed
 285          * result descriptors (or complete packets) that are written to
 286          * the RDR.
 287          */
 288         writel(MTK_DESC_SIZE(MTK_DESC_SZ) | MTK_DESC_OFFSET(MTK_DESC_OFF),
 289                cryp->base + RDR_DESC_SIZE(i));
 290 
 291         /*
 292          * Configure HIA fetch size and fetch threshold that are used to
 293          * fetch blocks of multiple descriptors.
 294          */
 295         writel(MTK_DESC_FETCH_SIZE(count * MTK_DESC_OFF) |
 296                MTK_DESC_FETCH_THRESH(count * rndup) |
 297                MTK_DESC_OVL_IRQ_EN,
 298                    cryp->base + RDR_CFG(i));
 299 }
 300 
 301 static int mtk_packet_engine_setup(struct mtk_cryp *cryp)
 302 {
 303         struct mtk_sys_cap cap;
 304         int i, err;
 305         u32 val;
 306 
 307         cap.hia_ver = readl(cryp->base + HIA_VERSION);
 308         cap.hia_opt = readl(cryp->base + HIA_OPTIONS);
 309         cap.hw_opt = readl(cryp->base + EIP97_OPTIONS);
 310 
 311         if (!(((u16)cap.hia_ver) == MTK_HIA_SIGNATURE))
 312                 return -EINVAL;
 313 
 314         /* Configure endianness conversion method for master (DMA) interface */
 315         writel(0, cryp->base + EIP97_MST_CTRL);
 316 
 317         /* Set HIA burst size */
 318         val = readl(cryp->base + HIA_MST_CTRL);
 319         val &= ~MTK_BURST_SIZE_MSK;
 320         val |= MTK_BURST_SIZE(5);
 321         writel(val, cryp->base + HIA_MST_CTRL);
 322 
 323         err = mtk_dfe_dse_reset(cryp);
 324         if (err) {
 325                 dev_err(cryp->dev, "Failed to reset DFE and DSE.\n");
 326                 return err;
 327         }
 328 
 329         mtk_dfe_dse_buf_setup(cryp, &cap);
 330 
 331         /* Enable the 4 rings for the packet engines. */
 332         mtk_desc_ring_link(cryp, 0xf);
 333 
 334         for (i = 0; i < MTK_RING_MAX; i++) {
 335                 mtk_cmd_desc_ring_setup(cryp, i, &cap);
 336                 mtk_res_desc_ring_setup(cryp, i, &cap);
 337         }
 338 
 339         writel(MTK_PE_TK_LOC_AVL | MTK_PE_PROC_HELD | MTK_PE_TK_TIMEOUT_EN,
 340                cryp->base + PE_TOKEN_CTRL_STAT);
 341 
 342         /* Clear all pending interrupts */
 343         writel(MTK_AIC_G_CLR, cryp->base + AIC_G_ACK);
 344         writel(MTK_PE_INPUT_DMA_ERR | MTK_PE_OUTPUT_DMA_ERR |
 345                MTK_PE_PKT_PORC_ERR | MTK_PE_PKT_TIMEOUT |
 346                MTK_PE_FATAL_ERR | MTK_PE_INPUT_DMA_ERR_EN |
 347                MTK_PE_OUTPUT_DMA_ERR_EN | MTK_PE_PKT_PORC_ERR_EN |
 348                MTK_PE_PKT_TIMEOUT_EN | MTK_PE_FATAL_ERR_EN |
 349                MTK_PE_INT_OUT_EN,
 350                cryp->base + PE_INTERRUPT_CTRL_STAT);
 351 
 352         return 0;
 353 }
 354 
 355 static int mtk_aic_cap_check(struct mtk_cryp *cryp, int hw)
 356 {
 357         u32 val;
 358 
 359         if (hw == MTK_RING_MAX)
 360                 val = readl(cryp->base + AIC_G_VERSION);
 361         else
 362                 val = readl(cryp->base + AIC_VERSION(hw));
 363 
 364         val &= MTK_AIC_VER_MSK;
 365         if (val != MTK_AIC_VER11 && val != MTK_AIC_VER12)
 366                 return -ENXIO;
 367 
 368         if (hw == MTK_RING_MAX)
 369                 val = readl(cryp->base + AIC_G_OPTIONS);
 370         else
 371                 val = readl(cryp->base + AIC_OPTIONS(hw));
 372 
 373         val &= MTK_AIC_INT_MSK;
 374         if (!val || val > 32)
 375                 return -ENXIO;
 376 
 377         return 0;
 378 }
 379 
 380 static int mtk_aic_init(struct mtk_cryp *cryp, int hw)
 381 {
 382         int err;
 383 
 384         err = mtk_aic_cap_check(cryp, hw);
 385         if (err)
 386                 return err;
 387 
 388         /* Disable all interrupts and set initial configuration */
 389         if (hw == MTK_RING_MAX) {
 390                 writel(0, cryp->base + AIC_G_ENABLE_CTRL);
 391                 writel(0, cryp->base + AIC_G_POL_CTRL);
 392                 writel(0, cryp->base + AIC_G_TYPE_CTRL);
 393                 writel(0, cryp->base + AIC_G_ENABLE_SET);
 394         } else {
 395                 writel(0, cryp->base + AIC_ENABLE_CTRL(hw));
 396                 writel(0, cryp->base + AIC_POL_CTRL(hw));
 397                 writel(0, cryp->base + AIC_TYPE_CTRL(hw));
 398                 writel(0, cryp->base + AIC_ENABLE_SET(hw));
 399         }
 400 
 401         return 0;
 402 }
 403 
 404 static int mtk_accelerator_init(struct mtk_cryp *cryp)
 405 {
 406         int i, err;
 407 
 408         /* Initialize advanced interrupt controller(AIC) */
 409         for (i = 0; i < MTK_IRQ_NUM; i++) {
 410                 err = mtk_aic_init(cryp, i);
 411                 if (err) {
 412                         dev_err(cryp->dev, "Failed to initialize AIC.\n");
 413                         return err;
 414                 }
 415         }
 416 
 417         /* Initialize packet engine */
 418         err = mtk_packet_engine_setup(cryp);
 419         if (err) {
 420                 dev_err(cryp->dev, "Failed to configure packet engine.\n");
 421                 return err;
 422         }
 423 
 424         return 0;
 425 }
 426 
 427 static void mtk_desc_dma_free(struct mtk_cryp *cryp)
 428 {
 429         int i;
 430 
 431         for (i = 0; i < MTK_RING_MAX; i++) {
 432                 dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
 433                                   cryp->ring[i]->res_base,
 434                                   cryp->ring[i]->res_dma);
 435                 dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
 436                                   cryp->ring[i]->cmd_base,
 437                                   cryp->ring[i]->cmd_dma);
 438                 kfree(cryp->ring[i]);
 439         }
 440 }
 441 
 442 static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
 443 {
 444         struct mtk_ring **ring = cryp->ring;
 445         int i, err = ENOMEM;
 446 
 447         for (i = 0; i < MTK_RING_MAX; i++) {
 448                 ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL);
 449                 if (!ring[i])
 450                         goto err_cleanup;
 451 
 452                 ring[i]->cmd_base = dma_alloc_coherent(cryp->dev,
 453                                                        MTK_DESC_RING_SZ,
 454                                                        &ring[i]->cmd_dma,
 455                                                        GFP_KERNEL);
 456                 if (!ring[i]->cmd_base)
 457                         goto err_cleanup;
 458 
 459                 ring[i]->res_base = dma_alloc_coherent(cryp->dev,
 460                                                        MTK_DESC_RING_SZ,
 461                                                        &ring[i]->res_dma,
 462                                                        GFP_KERNEL);
 463                 if (!ring[i]->res_base)
 464                         goto err_cleanup;
 465 
 466                 ring[i]->cmd_next = ring[i]->cmd_base;
 467                 ring[i]->res_next = ring[i]->res_base;
 468         }
 469         return 0;
 470 
 471 err_cleanup:
 472         for (; i--; ) {
 473                 dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
 474                                   ring[i]->res_base, ring[i]->res_dma);
 475                 dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
 476                                   ring[i]->cmd_base, ring[i]->cmd_dma);
 477                 kfree(ring[i]);
 478         }
 479         return err;
 480 }
 481 
 482 static int mtk_crypto_probe(struct platform_device *pdev)
 483 {
 484         struct mtk_cryp *cryp;
 485         int i, err;
 486 
 487         cryp = devm_kzalloc(&pdev->dev, sizeof(*cryp), GFP_KERNEL);
 488         if (!cryp)
 489                 return -ENOMEM;
 490 
 491         cryp->base = devm_platform_ioremap_resource(pdev, 0);
 492         if (IS_ERR(cryp->base))
 493                 return PTR_ERR(cryp->base);
 494 
 495         for (i = 0; i < MTK_IRQ_NUM; i++) {
 496                 cryp->irq[i] = platform_get_irq(pdev, i);
 497                 if (cryp->irq[i] < 0)
 498                         return cryp->irq[i];
 499         }
 500 
 501         cryp->clk_cryp = devm_clk_get(&pdev->dev, "cryp");
 502         if (IS_ERR(cryp->clk_cryp))
 503                 return -EPROBE_DEFER;
 504 
 505         cryp->dev = &pdev->dev;
 506         pm_runtime_enable(cryp->dev);
 507         pm_runtime_get_sync(cryp->dev);
 508 
 509         err = clk_prepare_enable(cryp->clk_cryp);
 510         if (err)
 511                 goto err_clk_cryp;
 512 
 513         /* Allocate four command/result descriptor rings */
 514         err = mtk_desc_ring_alloc(cryp);
 515         if (err) {
 516                 dev_err(cryp->dev, "Unable to allocate descriptor rings.\n");
 517                 goto err_resource;
 518         }
 519 
 520         /* Initialize hardware modules */
 521         err = mtk_accelerator_init(cryp);
 522         if (err) {
 523                 dev_err(cryp->dev, "Failed to initialize cryptographic engine.\n");
 524                 goto err_engine;
 525         }
 526 
 527         err = mtk_cipher_alg_register(cryp);
 528         if (err) {
 529                 dev_err(cryp->dev, "Unable to register cipher algorithm.\n");
 530                 goto err_cipher;
 531         }
 532 
 533         err = mtk_hash_alg_register(cryp);
 534         if (err) {
 535                 dev_err(cryp->dev, "Unable to register hash algorithm.\n");
 536                 goto err_hash;
 537         }
 538 
 539         platform_set_drvdata(pdev, cryp);
 540         return 0;
 541 
 542 err_hash:
 543         mtk_cipher_alg_release(cryp);
 544 err_cipher:
 545         mtk_dfe_dse_reset(cryp);
 546 err_engine:
 547         mtk_desc_dma_free(cryp);
 548 err_resource:
 549         clk_disable_unprepare(cryp->clk_cryp);
 550 err_clk_cryp:
 551         pm_runtime_put_sync(cryp->dev);
 552         pm_runtime_disable(cryp->dev);
 553 
 554         return err;
 555 }
 556 
 557 static int mtk_crypto_remove(struct platform_device *pdev)
 558 {
 559         struct mtk_cryp *cryp = platform_get_drvdata(pdev);
 560 
 561         mtk_hash_alg_release(cryp);
 562         mtk_cipher_alg_release(cryp);
 563         mtk_desc_dma_free(cryp);
 564 
 565         clk_disable_unprepare(cryp->clk_cryp);
 566 
 567         pm_runtime_put_sync(cryp->dev);
 568         pm_runtime_disable(cryp->dev);
 569         platform_set_drvdata(pdev, NULL);
 570 
 571         return 0;
 572 }
 573 
 574 static const struct of_device_id of_crypto_id[] = {
 575         { .compatible = "mediatek,eip97-crypto" },
 576         {},
 577 };
 578 MODULE_DEVICE_TABLE(of, of_crypto_id);
 579 
 580 static struct platform_driver mtk_crypto_driver = {
 581         .probe = mtk_crypto_probe,
 582         .remove = mtk_crypto_remove,
 583         .driver = {
 584                    .name = "mtk-crypto",
 585                    .of_match_table = of_crypto_id,
 586         },
 587 };
 588 module_platform_driver(mtk_crypto_driver);
 589 
 590 MODULE_LICENSE("GPL");
 591 MODULE_AUTHOR("Ryder Lee <ryder.lee@mediatek.com>");
 592 MODULE_DESCRIPTION("Cryptographic accelerator driver for EIP97");

/* [<][>][^][v][top][bottom][index][help] */