root/drivers/crypto/rockchip/rk3288_crypto.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. rk_crypto_enable_clk
  2. rk_crypto_disable_clk
  3. check_alignment
  4. rk_load_data
  5. rk_unload_data
  6. rk_crypto_irq_handle
  7. rk_crypto_enqueue
  8. rk_crypto_queue_task_cb
  9. rk_crypto_done_task_cb
  10. rk_crypto_register
  11. rk_crypto_unregister
  12. rk_crypto_action
  13. rk_crypto_probe
  14. rk_crypto_remove

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Crypto acceleration support for Rockchip RK3288
   4  *
   5  * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
   6  *
   7  * Author: Zain Wang <zain.wang@rock-chips.com>
   8  *
   9  * Some ideas are from marvell-cesa.c and s5p-sss.c driver.
  10  */
  11 
  12 #include "rk3288_crypto.h"
  13 #include <linux/module.h>
  14 #include <linux/platform_device.h>
  15 #include <linux/of.h>
  16 #include <linux/clk.h>
  17 #include <linux/crypto.h>
  18 #include <linux/reset.h>
  19 
  20 static int rk_crypto_enable_clk(struct rk_crypto_info *dev)
  21 {
  22         int err;
  23 
  24         err = clk_prepare_enable(dev->sclk);
  25         if (err) {
  26                 dev_err(dev->dev, "[%s:%d], Couldn't enable clock sclk\n",
  27                         __func__, __LINE__);
  28                 goto err_return;
  29         }
  30         err = clk_prepare_enable(dev->aclk);
  31         if (err) {
  32                 dev_err(dev->dev, "[%s:%d], Couldn't enable clock aclk\n",
  33                         __func__, __LINE__);
  34                 goto err_aclk;
  35         }
  36         err = clk_prepare_enable(dev->hclk);
  37         if (err) {
  38                 dev_err(dev->dev, "[%s:%d], Couldn't enable clock hclk\n",
  39                         __func__, __LINE__);
  40                 goto err_hclk;
  41         }
  42         err = clk_prepare_enable(dev->dmaclk);
  43         if (err) {
  44                 dev_err(dev->dev, "[%s:%d], Couldn't enable clock dmaclk\n",
  45                         __func__, __LINE__);
  46                 goto err_dmaclk;
  47         }
  48         return err;
  49 err_dmaclk:
  50         clk_disable_unprepare(dev->hclk);
  51 err_hclk:
  52         clk_disable_unprepare(dev->aclk);
  53 err_aclk:
  54         clk_disable_unprepare(dev->sclk);
  55 err_return:
  56         return err;
  57 }
  58 
  59 static void rk_crypto_disable_clk(struct rk_crypto_info *dev)
  60 {
  61         clk_disable_unprepare(dev->dmaclk);
  62         clk_disable_unprepare(dev->hclk);
  63         clk_disable_unprepare(dev->aclk);
  64         clk_disable_unprepare(dev->sclk);
  65 }
  66 
  67 static int check_alignment(struct scatterlist *sg_src,
  68                            struct scatterlist *sg_dst,
  69                            int align_mask)
  70 {
  71         int in, out, align;
  72 
  73         in = IS_ALIGNED((uint32_t)sg_src->offset, 4) &&
  74              IS_ALIGNED((uint32_t)sg_src->length, align_mask);
  75         if (!sg_dst)
  76                 return in;
  77         out = IS_ALIGNED((uint32_t)sg_dst->offset, 4) &&
  78               IS_ALIGNED((uint32_t)sg_dst->length, align_mask);
  79         align = in && out;
  80 
  81         return (align && (sg_src->length == sg_dst->length));
  82 }
  83 
  84 static int rk_load_data(struct rk_crypto_info *dev,
  85                         struct scatterlist *sg_src,
  86                         struct scatterlist *sg_dst)
  87 {
  88         unsigned int count;
  89 
  90         dev->aligned = dev->aligned ?
  91                 check_alignment(sg_src, sg_dst, dev->align_size) :
  92                 dev->aligned;
  93         if (dev->aligned) {
  94                 count = min(dev->left_bytes, sg_src->length);
  95                 dev->left_bytes -= count;
  96 
  97                 if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) {
  98                         dev_err(dev->dev, "[%s:%d] dma_map_sg(src)  error\n",
  99                                 __func__, __LINE__);
 100                         return -EINVAL;
 101                 }
 102                 dev->addr_in = sg_dma_address(sg_src);
 103 
 104                 if (sg_dst) {
 105                         if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) {
 106                                 dev_err(dev->dev,
 107                                         "[%s:%d] dma_map_sg(dst)  error\n",
 108                                         __func__, __LINE__);
 109                                 dma_unmap_sg(dev->dev, sg_src, 1,
 110                                              DMA_TO_DEVICE);
 111                                 return -EINVAL;
 112                         }
 113                         dev->addr_out = sg_dma_address(sg_dst);
 114                 }
 115         } else {
 116                 count = (dev->left_bytes > PAGE_SIZE) ?
 117                         PAGE_SIZE : dev->left_bytes;
 118 
 119                 if (!sg_pcopy_to_buffer(dev->first, dev->src_nents,
 120                                         dev->addr_vir, count,
 121                                         dev->total - dev->left_bytes)) {
 122                         dev_err(dev->dev, "[%s:%d] pcopy err\n",
 123                                 __func__, __LINE__);
 124                         return -EINVAL;
 125                 }
 126                 dev->left_bytes -= count;
 127                 sg_init_one(&dev->sg_tmp, dev->addr_vir, count);
 128                 if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) {
 129                         dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp)  error\n",
 130                                 __func__, __LINE__);
 131                         return -ENOMEM;
 132                 }
 133                 dev->addr_in = sg_dma_address(&dev->sg_tmp);
 134 
 135                 if (sg_dst) {
 136                         if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1,
 137                                         DMA_FROM_DEVICE)) {
 138                                 dev_err(dev->dev,
 139                                         "[%s:%d] dma_map_sg(sg_tmp)  error\n",
 140                                         __func__, __LINE__);
 141                                 dma_unmap_sg(dev->dev, &dev->sg_tmp, 1,
 142                                              DMA_TO_DEVICE);
 143                                 return -ENOMEM;
 144                         }
 145                         dev->addr_out = sg_dma_address(&dev->sg_tmp);
 146                 }
 147         }
 148         dev->count = count;
 149         return 0;
 150 }
 151 
 152 static void rk_unload_data(struct rk_crypto_info *dev)
 153 {
 154         struct scatterlist *sg_in, *sg_out;
 155 
 156         sg_in = dev->aligned ? dev->sg_src : &dev->sg_tmp;
 157         dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE);
 158 
 159         if (dev->sg_dst) {
 160                 sg_out = dev->aligned ? dev->sg_dst : &dev->sg_tmp;
 161                 dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE);
 162         }
 163 }
 164 
 165 static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id)
 166 {
 167         struct rk_crypto_info *dev  = platform_get_drvdata(dev_id);
 168         u32 interrupt_status;
 169 
 170         spin_lock(&dev->lock);
 171         interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS);
 172         CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status);
 173 
 174         if (interrupt_status & 0x0a) {
 175                 dev_warn(dev->dev, "DMA Error\n");
 176                 dev->err = -EFAULT;
 177         }
 178         tasklet_schedule(&dev->done_task);
 179 
 180         spin_unlock(&dev->lock);
 181         return IRQ_HANDLED;
 182 }
 183 
 184 static int rk_crypto_enqueue(struct rk_crypto_info *dev,
 185                               struct crypto_async_request *async_req)
 186 {
 187         unsigned long flags;
 188         int ret;
 189 
 190         spin_lock_irqsave(&dev->lock, flags);
 191         ret = crypto_enqueue_request(&dev->queue, async_req);
 192         if (dev->busy) {
 193                 spin_unlock_irqrestore(&dev->lock, flags);
 194                 return ret;
 195         }
 196         dev->busy = true;
 197         spin_unlock_irqrestore(&dev->lock, flags);
 198         tasklet_schedule(&dev->queue_task);
 199 
 200         return ret;
 201 }
 202 
 203 static void rk_crypto_queue_task_cb(unsigned long data)
 204 {
 205         struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
 206         struct crypto_async_request *async_req, *backlog;
 207         unsigned long flags;
 208         int err = 0;
 209 
 210         dev->err = 0;
 211         spin_lock_irqsave(&dev->lock, flags);
 212         backlog   = crypto_get_backlog(&dev->queue);
 213         async_req = crypto_dequeue_request(&dev->queue);
 214 
 215         if (!async_req) {
 216                 dev->busy = false;
 217                 spin_unlock_irqrestore(&dev->lock, flags);
 218                 return;
 219         }
 220         spin_unlock_irqrestore(&dev->lock, flags);
 221 
 222         if (backlog) {
 223                 backlog->complete(backlog, -EINPROGRESS);
 224                 backlog = NULL;
 225         }
 226 
 227         dev->async_req = async_req;
 228         err = dev->start(dev);
 229         if (err)
 230                 dev->complete(dev->async_req, err);
 231 }
 232 
 233 static void rk_crypto_done_task_cb(unsigned long data)
 234 {
 235         struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
 236 
 237         if (dev->err) {
 238                 dev->complete(dev->async_req, dev->err);
 239                 return;
 240         }
 241 
 242         dev->err = dev->update(dev);
 243         if (dev->err)
 244                 dev->complete(dev->async_req, dev->err);
 245 }
 246 
 247 static struct rk_crypto_tmp *rk_cipher_algs[] = {
 248         &rk_ecb_aes_alg,
 249         &rk_cbc_aes_alg,
 250         &rk_ecb_des_alg,
 251         &rk_cbc_des_alg,
 252         &rk_ecb_des3_ede_alg,
 253         &rk_cbc_des3_ede_alg,
 254         &rk_ahash_sha1,
 255         &rk_ahash_sha256,
 256         &rk_ahash_md5,
 257 };
 258 
 259 static int rk_crypto_register(struct rk_crypto_info *crypto_info)
 260 {
 261         unsigned int i, k;
 262         int err = 0;
 263 
 264         for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
 265                 rk_cipher_algs[i]->dev = crypto_info;
 266                 if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
 267                         err = crypto_register_alg(
 268                                         &rk_cipher_algs[i]->alg.crypto);
 269                 else
 270                         err = crypto_register_ahash(
 271                                         &rk_cipher_algs[i]->alg.hash);
 272                 if (err)
 273                         goto err_cipher_algs;
 274         }
 275         return 0;
 276 
 277 err_cipher_algs:
 278         for (k = 0; k < i; k++) {
 279                 if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
 280                         crypto_unregister_alg(&rk_cipher_algs[k]->alg.crypto);
 281                 else
 282                         crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
 283         }
 284         return err;
 285 }
 286 
 287 static void rk_crypto_unregister(void)
 288 {
 289         unsigned int i;
 290 
 291         for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
 292                 if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
 293                         crypto_unregister_alg(&rk_cipher_algs[i]->alg.crypto);
 294                 else
 295                         crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
 296         }
 297 }
 298 
 299 static void rk_crypto_action(void *data)
 300 {
 301         struct rk_crypto_info *crypto_info = data;
 302 
 303         reset_control_assert(crypto_info->rst);
 304 }
 305 
 306 static const struct of_device_id crypto_of_id_table[] = {
 307         { .compatible = "rockchip,rk3288-crypto" },
 308         {}
 309 };
 310 MODULE_DEVICE_TABLE(of, crypto_of_id_table);
 311 
 312 static int rk_crypto_probe(struct platform_device *pdev)
 313 {
 314         struct device *dev = &pdev->dev;
 315         struct rk_crypto_info *crypto_info;
 316         int err = 0;
 317 
 318         crypto_info = devm_kzalloc(&pdev->dev,
 319                                    sizeof(*crypto_info), GFP_KERNEL);
 320         if (!crypto_info) {
 321                 err = -ENOMEM;
 322                 goto err_crypto;
 323         }
 324 
 325         crypto_info->rst = devm_reset_control_get(dev, "crypto-rst");
 326         if (IS_ERR(crypto_info->rst)) {
 327                 err = PTR_ERR(crypto_info->rst);
 328                 goto err_crypto;
 329         }
 330 
 331         reset_control_assert(crypto_info->rst);
 332         usleep_range(10, 20);
 333         reset_control_deassert(crypto_info->rst);
 334 
 335         err = devm_add_action_or_reset(dev, rk_crypto_action, crypto_info);
 336         if (err)
 337                 goto err_crypto;
 338 
 339         spin_lock_init(&crypto_info->lock);
 340 
 341         crypto_info->reg = devm_platform_ioremap_resource(pdev, 0);
 342         if (IS_ERR(crypto_info->reg)) {
 343                 err = PTR_ERR(crypto_info->reg);
 344                 goto err_crypto;
 345         }
 346 
 347         crypto_info->aclk = devm_clk_get(&pdev->dev, "aclk");
 348         if (IS_ERR(crypto_info->aclk)) {
 349                 err = PTR_ERR(crypto_info->aclk);
 350                 goto err_crypto;
 351         }
 352 
 353         crypto_info->hclk = devm_clk_get(&pdev->dev, "hclk");
 354         if (IS_ERR(crypto_info->hclk)) {
 355                 err = PTR_ERR(crypto_info->hclk);
 356                 goto err_crypto;
 357         }
 358 
 359         crypto_info->sclk = devm_clk_get(&pdev->dev, "sclk");
 360         if (IS_ERR(crypto_info->sclk)) {
 361                 err = PTR_ERR(crypto_info->sclk);
 362                 goto err_crypto;
 363         }
 364 
 365         crypto_info->dmaclk = devm_clk_get(&pdev->dev, "apb_pclk");
 366         if (IS_ERR(crypto_info->dmaclk)) {
 367                 err = PTR_ERR(crypto_info->dmaclk);
 368                 goto err_crypto;
 369         }
 370 
 371         crypto_info->irq = platform_get_irq(pdev, 0);
 372         if (crypto_info->irq < 0) {
 373                 dev_warn(crypto_info->dev,
 374                          "control Interrupt is not available.\n");
 375                 err = crypto_info->irq;
 376                 goto err_crypto;
 377         }
 378 
 379         err = devm_request_irq(&pdev->dev, crypto_info->irq,
 380                                rk_crypto_irq_handle, IRQF_SHARED,
 381                                "rk-crypto", pdev);
 382 
 383         if (err) {
 384                 dev_err(crypto_info->dev, "irq request failed.\n");
 385                 goto err_crypto;
 386         }
 387 
 388         crypto_info->dev = &pdev->dev;
 389         platform_set_drvdata(pdev, crypto_info);
 390 
 391         tasklet_init(&crypto_info->queue_task,
 392                      rk_crypto_queue_task_cb, (unsigned long)crypto_info);
 393         tasklet_init(&crypto_info->done_task,
 394                      rk_crypto_done_task_cb, (unsigned long)crypto_info);
 395         crypto_init_queue(&crypto_info->queue, 50);
 396 
 397         crypto_info->enable_clk = rk_crypto_enable_clk;
 398         crypto_info->disable_clk = rk_crypto_disable_clk;
 399         crypto_info->load_data = rk_load_data;
 400         crypto_info->unload_data = rk_unload_data;
 401         crypto_info->enqueue = rk_crypto_enqueue;
 402         crypto_info->busy = false;
 403 
 404         err = rk_crypto_register(crypto_info);
 405         if (err) {
 406                 dev_err(dev, "err in register alg");
 407                 goto err_register_alg;
 408         }
 409 
 410         dev_info(dev, "Crypto Accelerator successfully registered\n");
 411         return 0;
 412 
 413 err_register_alg:
 414         tasklet_kill(&crypto_info->queue_task);
 415         tasklet_kill(&crypto_info->done_task);
 416 err_crypto:
 417         return err;
 418 }
 419 
 420 static int rk_crypto_remove(struct platform_device *pdev)
 421 {
 422         struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev);
 423 
 424         rk_crypto_unregister();
 425         tasklet_kill(&crypto_tmp->done_task);
 426         tasklet_kill(&crypto_tmp->queue_task);
 427         return 0;
 428 }
 429 
 430 static struct platform_driver crypto_driver = {
 431         .probe          = rk_crypto_probe,
 432         .remove         = rk_crypto_remove,
 433         .driver         = {
 434                 .name   = "rk3288-crypto",
 435                 .of_match_table = crypto_of_id_table,
 436         },
 437 };
 438 
 439 module_platform_driver(crypto_driver);
 440 
 441 MODULE_AUTHOR("Zain Wang <zain.wang@rock-chips.com>");
 442 MODULE_DESCRIPTION("Support for Rockchip's cryptographic engine");
 443 MODULE_LICENSE("GPL");

/* [<][>][^][v][top][bottom][index][help] */