root/drivers/crypto/qce/core.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. qce_unregister_algs
  2. qce_register_algs
  3. qce_handle_request
  4. qce_handle_queue
  5. qce_tasklet_req_done
  6. qce_async_request_enqueue
  7. qce_async_request_done
  8. qce_check_version
  9. qce_crypto_probe
  10. qce_crypto_remove

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
   4  */
   5 
   6 #include <linux/clk.h>
   7 #include <linux/interrupt.h>
   8 #include <linux/module.h>
   9 #include <linux/mod_devicetable.h>
  10 #include <linux/platform_device.h>
  11 #include <linux/spinlock.h>
  12 #include <linux/types.h>
  13 #include <crypto/algapi.h>
  14 #include <crypto/internal/hash.h>
  15 #include <crypto/sha.h>
  16 
  17 #include "core.h"
  18 #include "cipher.h"
  19 #include "sha.h"
  20 
  21 #define QCE_MAJOR_VERSION5      0x05
  22 #define QCE_QUEUE_LENGTH        1
  23 
  24 static const struct qce_algo_ops *qce_ops[] = {
  25         &ablkcipher_ops,
  26         &ahash_ops,
  27 };
  28 
  29 static void qce_unregister_algs(struct qce_device *qce)
  30 {
  31         const struct qce_algo_ops *ops;
  32         int i;
  33 
  34         for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
  35                 ops = qce_ops[i];
  36                 ops->unregister_algs(qce);
  37         }
  38 }
  39 
  40 static int qce_register_algs(struct qce_device *qce)
  41 {
  42         const struct qce_algo_ops *ops;
  43         int i, ret = -ENODEV;
  44 
  45         for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
  46                 ops = qce_ops[i];
  47                 ret = ops->register_algs(qce);
  48                 if (ret)
  49                         break;
  50         }
  51 
  52         return ret;
  53 }
  54 
  55 static int qce_handle_request(struct crypto_async_request *async_req)
  56 {
  57         int ret = -EINVAL, i;
  58         const struct qce_algo_ops *ops;
  59         u32 type = crypto_tfm_alg_type(async_req->tfm);
  60 
  61         for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
  62                 ops = qce_ops[i];
  63                 if (type != ops->type)
  64                         continue;
  65                 ret = ops->async_req_handle(async_req);
  66                 break;
  67         }
  68 
  69         return ret;
  70 }
  71 
  72 static int qce_handle_queue(struct qce_device *qce,
  73                             struct crypto_async_request *req)
  74 {
  75         struct crypto_async_request *async_req, *backlog;
  76         unsigned long flags;
  77         int ret = 0, err;
  78 
  79         spin_lock_irqsave(&qce->lock, flags);
  80 
  81         if (req)
  82                 ret = crypto_enqueue_request(&qce->queue, req);
  83 
  84         /* busy, do not dequeue request */
  85         if (qce->req) {
  86                 spin_unlock_irqrestore(&qce->lock, flags);
  87                 return ret;
  88         }
  89 
  90         backlog = crypto_get_backlog(&qce->queue);
  91         async_req = crypto_dequeue_request(&qce->queue);
  92         if (async_req)
  93                 qce->req = async_req;
  94 
  95         spin_unlock_irqrestore(&qce->lock, flags);
  96 
  97         if (!async_req)
  98                 return ret;
  99 
 100         if (backlog) {
 101                 spin_lock_bh(&qce->lock);
 102                 backlog->complete(backlog, -EINPROGRESS);
 103                 spin_unlock_bh(&qce->lock);
 104         }
 105 
 106         err = qce_handle_request(async_req);
 107         if (err) {
 108                 qce->result = err;
 109                 tasklet_schedule(&qce->done_tasklet);
 110         }
 111 
 112         return ret;
 113 }
 114 
 115 static void qce_tasklet_req_done(unsigned long data)
 116 {
 117         struct qce_device *qce = (struct qce_device *)data;
 118         struct crypto_async_request *req;
 119         unsigned long flags;
 120 
 121         spin_lock_irqsave(&qce->lock, flags);
 122         req = qce->req;
 123         qce->req = NULL;
 124         spin_unlock_irqrestore(&qce->lock, flags);
 125 
 126         if (req)
 127                 req->complete(req, qce->result);
 128 
 129         qce_handle_queue(qce, NULL);
 130 }
 131 
 132 static int qce_async_request_enqueue(struct qce_device *qce,
 133                                      struct crypto_async_request *req)
 134 {
 135         return qce_handle_queue(qce, req);
 136 }
 137 
 138 static void qce_async_request_done(struct qce_device *qce, int ret)
 139 {
 140         qce->result = ret;
 141         tasklet_schedule(&qce->done_tasklet);
 142 }
 143 
 144 static int qce_check_version(struct qce_device *qce)
 145 {
 146         u32 major, minor, step;
 147 
 148         qce_get_version(qce, &major, &minor, &step);
 149 
 150         /*
 151          * the driver does not support v5 with minor 0 because it has special
 152          * alignment requirements.
 153          */
 154         if (major != QCE_MAJOR_VERSION5 || minor == 0)
 155                 return -ENODEV;
 156 
 157         qce->burst_size = QCE_BAM_BURST_SIZE;
 158         qce->pipe_pair_id = 1;
 159 
 160         dev_dbg(qce->dev, "Crypto device found, version %d.%d.%d\n",
 161                 major, minor, step);
 162 
 163         return 0;
 164 }
 165 
 166 static int qce_crypto_probe(struct platform_device *pdev)
 167 {
 168         struct device *dev = &pdev->dev;
 169         struct qce_device *qce;
 170         int ret;
 171 
 172         qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL);
 173         if (!qce)
 174                 return -ENOMEM;
 175 
 176         qce->dev = dev;
 177         platform_set_drvdata(pdev, qce);
 178 
 179         qce->base = devm_platform_ioremap_resource(pdev, 0);
 180         if (IS_ERR(qce->base))
 181                 return PTR_ERR(qce->base);
 182 
 183         ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
 184         if (ret < 0)
 185                 return ret;
 186 
 187         qce->core = devm_clk_get(qce->dev, "core");
 188         if (IS_ERR(qce->core))
 189                 return PTR_ERR(qce->core);
 190 
 191         qce->iface = devm_clk_get(qce->dev, "iface");
 192         if (IS_ERR(qce->iface))
 193                 return PTR_ERR(qce->iface);
 194 
 195         qce->bus = devm_clk_get(qce->dev, "bus");
 196         if (IS_ERR(qce->bus))
 197                 return PTR_ERR(qce->bus);
 198 
 199         ret = clk_prepare_enable(qce->core);
 200         if (ret)
 201                 return ret;
 202 
 203         ret = clk_prepare_enable(qce->iface);
 204         if (ret)
 205                 goto err_clks_core;
 206 
 207         ret = clk_prepare_enable(qce->bus);
 208         if (ret)
 209                 goto err_clks_iface;
 210 
 211         ret = qce_dma_request(qce->dev, &qce->dma);
 212         if (ret)
 213                 goto err_clks;
 214 
 215         ret = qce_check_version(qce);
 216         if (ret)
 217                 goto err_clks;
 218 
 219         spin_lock_init(&qce->lock);
 220         tasklet_init(&qce->done_tasklet, qce_tasklet_req_done,
 221                      (unsigned long)qce);
 222         crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH);
 223 
 224         qce->async_req_enqueue = qce_async_request_enqueue;
 225         qce->async_req_done = qce_async_request_done;
 226 
 227         ret = qce_register_algs(qce);
 228         if (ret)
 229                 goto err_dma;
 230 
 231         return 0;
 232 
 233 err_dma:
 234         qce_dma_release(&qce->dma);
 235 err_clks:
 236         clk_disable_unprepare(qce->bus);
 237 err_clks_iface:
 238         clk_disable_unprepare(qce->iface);
 239 err_clks_core:
 240         clk_disable_unprepare(qce->core);
 241         return ret;
 242 }
 243 
 244 static int qce_crypto_remove(struct platform_device *pdev)
 245 {
 246         struct qce_device *qce = platform_get_drvdata(pdev);
 247 
 248         tasklet_kill(&qce->done_tasklet);
 249         qce_unregister_algs(qce);
 250         qce_dma_release(&qce->dma);
 251         clk_disable_unprepare(qce->bus);
 252         clk_disable_unprepare(qce->iface);
 253         clk_disable_unprepare(qce->core);
 254         return 0;
 255 }
 256 
 257 static const struct of_device_id qce_crypto_of_match[] = {
 258         { .compatible = "qcom,crypto-v5.1", },
 259         {}
 260 };
 261 MODULE_DEVICE_TABLE(of, qce_crypto_of_match);
 262 
 263 static struct platform_driver qce_crypto_driver = {
 264         .probe = qce_crypto_probe,
 265         .remove = qce_crypto_remove,
 266         .driver = {
 267                 .name = KBUILD_MODNAME,
 268                 .of_match_table = qce_crypto_of_match,
 269         },
 270 };
 271 module_platform_driver(qce_crypto_driver);
 272 
 273 MODULE_LICENSE("GPL v2");
 274 MODULE_DESCRIPTION("Qualcomm crypto engine driver");
 275 MODULE_ALIAS("platform:" KBUILD_MODNAME);
 276 MODULE_AUTHOR("The Linux Foundation");

/* [<][>][^][v][top][bottom][index][help] */