root/crypto/crypto_engine.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. crypto_finalize_request
  2. crypto_pump_requests
  3. crypto_pump_work
  4. crypto_transfer_request
  5. crypto_transfer_request_to_engine
  6. crypto_transfer_ablkcipher_request_to_engine
  7. crypto_transfer_aead_request_to_engine
  8. crypto_transfer_akcipher_request_to_engine
  9. crypto_transfer_hash_request_to_engine
  10. crypto_transfer_skcipher_request_to_engine
  11. crypto_finalize_ablkcipher_request
  12. crypto_finalize_aead_request
  13. crypto_finalize_akcipher_request
  14. crypto_finalize_hash_request
  15. crypto_finalize_skcipher_request
  16. crypto_engine_start
  17. crypto_engine_stop
  18. crypto_engine_alloc_init
  19. crypto_engine_exit

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * Handle async block request by crypto hardware engine.
   4  *
   5  * Copyright (C) 2016 Linaro, Inc.
   6  *
   7  * Author: Baolin Wang <baolin.wang@linaro.org>
   8  */
   9 
  10 #include <linux/err.h>
  11 #include <linux/delay.h>
  12 #include <crypto/engine.h>
  13 #include <uapi/linux/sched/types.h>
  14 #include "internal.h"
  15 
  16 #define CRYPTO_ENGINE_MAX_QLEN 10
  17 
  18 /**
  19  * crypto_finalize_request - finalize one request if the request is done
  20  * @engine: the hardware engine
  21  * @req: the request need to be finalized
  22  * @err: error number
  23  */
  24 static void crypto_finalize_request(struct crypto_engine *engine,
  25                              struct crypto_async_request *req, int err)
  26 {
  27         unsigned long flags;
  28         bool finalize_cur_req = false;
  29         int ret;
  30         struct crypto_engine_ctx *enginectx;
  31 
  32         spin_lock_irqsave(&engine->queue_lock, flags);
  33         if (engine->cur_req == req)
  34                 finalize_cur_req = true;
  35         spin_unlock_irqrestore(&engine->queue_lock, flags);
  36 
  37         if (finalize_cur_req) {
  38                 enginectx = crypto_tfm_ctx(req->tfm);
  39                 if (engine->cur_req_prepared &&
  40                     enginectx->op.unprepare_request) {
  41                         ret = enginectx->op.unprepare_request(engine, req);
  42                         if (ret)
  43                                 dev_err(engine->dev, "failed to unprepare request\n");
  44                 }
  45                 spin_lock_irqsave(&engine->queue_lock, flags);
  46                 engine->cur_req = NULL;
  47                 engine->cur_req_prepared = false;
  48                 spin_unlock_irqrestore(&engine->queue_lock, flags);
  49         }
  50 
  51         req->complete(req, err);
  52 
  53         kthread_queue_work(engine->kworker, &engine->pump_requests);
  54 }
  55 
  56 /**
  57  * crypto_pump_requests - dequeue one request from engine queue to process
  58  * @engine: the hardware engine
  59  * @in_kthread: true if we are in the context of the request pump thread
  60  *
  61  * This function checks if there is any request in the engine queue that
  62  * needs processing and if so call out to the driver to initialize hardware
  63  * and handle each request.
  64  */
  65 static void crypto_pump_requests(struct crypto_engine *engine,
  66                                  bool in_kthread)
  67 {
  68         struct crypto_async_request *async_req, *backlog;
  69         unsigned long flags;
  70         bool was_busy = false;
  71         int ret;
  72         struct crypto_engine_ctx *enginectx;
  73 
  74         spin_lock_irqsave(&engine->queue_lock, flags);
  75 
  76         /* Make sure we are not already running a request */
  77         if (engine->cur_req)
  78                 goto out;
  79 
  80         /* If another context is idling then defer */
  81         if (engine->idling) {
  82                 kthread_queue_work(engine->kworker, &engine->pump_requests);
  83                 goto out;
  84         }
  85 
  86         /* Check if the engine queue is idle */
  87         if (!crypto_queue_len(&engine->queue) || !engine->running) {
  88                 if (!engine->busy)
  89                         goto out;
  90 
  91                 /* Only do teardown in the thread */
  92                 if (!in_kthread) {
  93                         kthread_queue_work(engine->kworker,
  94                                            &engine->pump_requests);
  95                         goto out;
  96                 }
  97 
  98                 engine->busy = false;
  99                 engine->idling = true;
 100                 spin_unlock_irqrestore(&engine->queue_lock, flags);
 101 
 102                 if (engine->unprepare_crypt_hardware &&
 103                     engine->unprepare_crypt_hardware(engine))
 104                         dev_err(engine->dev, "failed to unprepare crypt hardware\n");
 105 
 106                 spin_lock_irqsave(&engine->queue_lock, flags);
 107                 engine->idling = false;
 108                 goto out;
 109         }
 110 
 111         /* Get the fist request from the engine queue to handle */
 112         backlog = crypto_get_backlog(&engine->queue);
 113         async_req = crypto_dequeue_request(&engine->queue);
 114         if (!async_req)
 115                 goto out;
 116 
 117         engine->cur_req = async_req;
 118         if (backlog)
 119                 backlog->complete(backlog, -EINPROGRESS);
 120 
 121         if (engine->busy)
 122                 was_busy = true;
 123         else
 124                 engine->busy = true;
 125 
 126         spin_unlock_irqrestore(&engine->queue_lock, flags);
 127 
 128         /* Until here we get the request need to be encrypted successfully */
 129         if (!was_busy && engine->prepare_crypt_hardware) {
 130                 ret = engine->prepare_crypt_hardware(engine);
 131                 if (ret) {
 132                         dev_err(engine->dev, "failed to prepare crypt hardware\n");
 133                         goto req_err;
 134                 }
 135         }
 136 
 137         enginectx = crypto_tfm_ctx(async_req->tfm);
 138 
 139         if (enginectx->op.prepare_request) {
 140                 ret = enginectx->op.prepare_request(engine, async_req);
 141                 if (ret) {
 142                         dev_err(engine->dev, "failed to prepare request: %d\n",
 143                                 ret);
 144                         goto req_err;
 145                 }
 146                 engine->cur_req_prepared = true;
 147         }
 148         if (!enginectx->op.do_one_request) {
 149                 dev_err(engine->dev, "failed to do request\n");
 150                 ret = -EINVAL;
 151                 goto req_err;
 152         }
 153         ret = enginectx->op.do_one_request(engine, async_req);
 154         if (ret) {
 155                 dev_err(engine->dev, "Failed to do one request from queue: %d\n", ret);
 156                 goto req_err;
 157         }
 158         return;
 159 
 160 req_err:
 161         crypto_finalize_request(engine, async_req, ret);
 162         return;
 163 
 164 out:
 165         spin_unlock_irqrestore(&engine->queue_lock, flags);
 166 }
 167 
 168 static void crypto_pump_work(struct kthread_work *work)
 169 {
 170         struct crypto_engine *engine =
 171                 container_of(work, struct crypto_engine, pump_requests);
 172 
 173         crypto_pump_requests(engine, true);
 174 }
 175 
 176 /**
 177  * crypto_transfer_request - transfer the new request into the engine queue
 178  * @engine: the hardware engine
 179  * @req: the request need to be listed into the engine queue
 180  */
 181 static int crypto_transfer_request(struct crypto_engine *engine,
 182                                    struct crypto_async_request *req,
 183                                    bool need_pump)
 184 {
 185         unsigned long flags;
 186         int ret;
 187 
 188         spin_lock_irqsave(&engine->queue_lock, flags);
 189 
 190         if (!engine->running) {
 191                 spin_unlock_irqrestore(&engine->queue_lock, flags);
 192                 return -ESHUTDOWN;
 193         }
 194 
 195         ret = crypto_enqueue_request(&engine->queue, req);
 196 
 197         if (!engine->busy && need_pump)
 198                 kthread_queue_work(engine->kworker, &engine->pump_requests);
 199 
 200         spin_unlock_irqrestore(&engine->queue_lock, flags);
 201         return ret;
 202 }
 203 
 204 /**
 205  * crypto_transfer_request_to_engine - transfer one request to list
 206  * into the engine queue
 207  * @engine: the hardware engine
 208  * @req: the request need to be listed into the engine queue
 209  */
 210 static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
 211                                              struct crypto_async_request *req)
 212 {
 213         return crypto_transfer_request(engine, req, true);
 214 }
 215 
 216 /**
 217  * crypto_transfer_ablkcipher_request_to_engine - transfer one ablkcipher_request
 218  * to list into the engine queue
 219  * @engine: the hardware engine
 220  * @req: the request need to be listed into the engine queue
 221  * TODO: Remove this function when skcipher conversion is finished
 222  */
 223 int crypto_transfer_ablkcipher_request_to_engine(struct crypto_engine *engine,
 224                                                  struct ablkcipher_request *req)
 225 {
 226         return crypto_transfer_request_to_engine(engine, &req->base);
 227 }
 228 EXPORT_SYMBOL_GPL(crypto_transfer_ablkcipher_request_to_engine);
 229 
 230 /**
 231  * crypto_transfer_aead_request_to_engine - transfer one aead_request
 232  * to list into the engine queue
 233  * @engine: the hardware engine
 234  * @req: the request need to be listed into the engine queue
 235  */
 236 int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
 237                                            struct aead_request *req)
 238 {
 239         return crypto_transfer_request_to_engine(engine, &req->base);
 240 }
 241 EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
 242 
 243 /**
 244  * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
 245  * to list into the engine queue
 246  * @engine: the hardware engine
 247  * @req: the request need to be listed into the engine queue
 248  */
 249 int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
 250                                                struct akcipher_request *req)
 251 {
 252         return crypto_transfer_request_to_engine(engine, &req->base);
 253 }
 254 EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
 255 
 256 /**
 257  * crypto_transfer_hash_request_to_engine - transfer one ahash_request
 258  * to list into the engine queue
 259  * @engine: the hardware engine
 260  * @req: the request need to be listed into the engine queue
 261  */
 262 int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
 263                                            struct ahash_request *req)
 264 {
 265         return crypto_transfer_request_to_engine(engine, &req->base);
 266 }
 267 EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
 268 
 269 /**
 270  * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
 271  * to list into the engine queue
 272  * @engine: the hardware engine
 273  * @req: the request need to be listed into the engine queue
 274  */
 275 int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
 276                                                struct skcipher_request *req)
 277 {
 278         return crypto_transfer_request_to_engine(engine, &req->base);
 279 }
 280 EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
 281 
 282 /**
 283  * crypto_finalize_ablkcipher_request - finalize one ablkcipher_request if
 284  * the request is done
 285  * @engine: the hardware engine
 286  * @req: the request need to be finalized
 287  * @err: error number
 288  * TODO: Remove this function when skcipher conversion is finished
 289  */
 290 void crypto_finalize_ablkcipher_request(struct crypto_engine *engine,
 291                                         struct ablkcipher_request *req, int err)
 292 {
 293         return crypto_finalize_request(engine, &req->base, err);
 294 }
 295 EXPORT_SYMBOL_GPL(crypto_finalize_ablkcipher_request);
 296 
 297 /**
 298  * crypto_finalize_aead_request - finalize one aead_request if
 299  * the request is done
 300  * @engine: the hardware engine
 301  * @req: the request need to be finalized
 302  * @err: error number
 303  */
 304 void crypto_finalize_aead_request(struct crypto_engine *engine,
 305                                   struct aead_request *req, int err)
 306 {
 307         return crypto_finalize_request(engine, &req->base, err);
 308 }
 309 EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
 310 
 311 /**
 312  * crypto_finalize_akcipher_request - finalize one akcipher_request if
 313  * the request is done
 314  * @engine: the hardware engine
 315  * @req: the request need to be finalized
 316  * @err: error number
 317  */
 318 void crypto_finalize_akcipher_request(struct crypto_engine *engine,
 319                                       struct akcipher_request *req, int err)
 320 {
 321         return crypto_finalize_request(engine, &req->base, err);
 322 }
 323 EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
 324 
 325 /**
 326  * crypto_finalize_hash_request - finalize one ahash_request if
 327  * the request is done
 328  * @engine: the hardware engine
 329  * @req: the request need to be finalized
 330  * @err: error number
 331  */
 332 void crypto_finalize_hash_request(struct crypto_engine *engine,
 333                                   struct ahash_request *req, int err)
 334 {
 335         return crypto_finalize_request(engine, &req->base, err);
 336 }
 337 EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
 338 
 339 /**
 340  * crypto_finalize_skcipher_request - finalize one skcipher_request if
 341  * the request is done
 342  * @engine: the hardware engine
 343  * @req: the request need to be finalized
 344  * @err: error number
 345  */
 346 void crypto_finalize_skcipher_request(struct crypto_engine *engine,
 347                                       struct skcipher_request *req, int err)
 348 {
 349         return crypto_finalize_request(engine, &req->base, err);
 350 }
 351 EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
 352 
 353 /**
 354  * crypto_engine_start - start the hardware engine
 355  * @engine: the hardware engine need to be started
 356  *
 357  * Return 0 on success, else on fail.
 358  */
 359 int crypto_engine_start(struct crypto_engine *engine)
 360 {
 361         unsigned long flags;
 362 
 363         spin_lock_irqsave(&engine->queue_lock, flags);
 364 
 365         if (engine->running || engine->busy) {
 366                 spin_unlock_irqrestore(&engine->queue_lock, flags);
 367                 return -EBUSY;
 368         }
 369 
 370         engine->running = true;
 371         spin_unlock_irqrestore(&engine->queue_lock, flags);
 372 
 373         kthread_queue_work(engine->kworker, &engine->pump_requests);
 374 
 375         return 0;
 376 }
 377 EXPORT_SYMBOL_GPL(crypto_engine_start);
 378 
 379 /**
 380  * crypto_engine_stop - stop the hardware engine
 381  * @engine: the hardware engine need to be stopped
 382  *
 383  * Return 0 on success, else on fail.
 384  */
 385 int crypto_engine_stop(struct crypto_engine *engine)
 386 {
 387         unsigned long flags;
 388         unsigned int limit = 500;
 389         int ret = 0;
 390 
 391         spin_lock_irqsave(&engine->queue_lock, flags);
 392 
 393         /*
 394          * If the engine queue is not empty or the engine is on busy state,
 395          * we need to wait for a while to pump the requests of engine queue.
 396          */
 397         while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
 398                 spin_unlock_irqrestore(&engine->queue_lock, flags);
 399                 msleep(20);
 400                 spin_lock_irqsave(&engine->queue_lock, flags);
 401         }
 402 
 403         if (crypto_queue_len(&engine->queue) || engine->busy)
 404                 ret = -EBUSY;
 405         else
 406                 engine->running = false;
 407 
 408         spin_unlock_irqrestore(&engine->queue_lock, flags);
 409 
 410         if (ret)
 411                 dev_warn(engine->dev, "could not stop engine\n");
 412 
 413         return ret;
 414 }
 415 EXPORT_SYMBOL_GPL(crypto_engine_stop);
 416 
 417 /**
 418  * crypto_engine_alloc_init - allocate crypto hardware engine structure and
 419  * initialize it.
 420  * @dev: the device attached with one hardware engine
 421  * @rt: whether this queue is set to run as a realtime task
 422  *
 423  * This must be called from context that can sleep.
 424  * Return: the crypto engine structure on success, else NULL.
 425  */
 426 struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
 427 {
 428         struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 };
 429         struct crypto_engine *engine;
 430 
 431         if (!dev)
 432                 return NULL;
 433 
 434         engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
 435         if (!engine)
 436                 return NULL;
 437 
 438         engine->dev = dev;
 439         engine->rt = rt;
 440         engine->running = false;
 441         engine->busy = false;
 442         engine->idling = false;
 443         engine->cur_req_prepared = false;
 444         engine->priv_data = dev;
 445         snprintf(engine->name, sizeof(engine->name),
 446                  "%s-engine", dev_name(dev));
 447 
 448         crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
 449         spin_lock_init(&engine->queue_lock);
 450 
 451         engine->kworker = kthread_create_worker(0, "%s", engine->name);
 452         if (IS_ERR(engine->kworker)) {
 453                 dev_err(dev, "failed to create crypto request pump task\n");
 454                 return NULL;
 455         }
 456         kthread_init_work(&engine->pump_requests, crypto_pump_work);
 457 
 458         if (engine->rt) {
 459                 dev_info(dev, "will run requests pump with realtime priority\n");
 460                 sched_setscheduler(engine->kworker->task, SCHED_FIFO, &param);
 461         }
 462 
 463         return engine;
 464 }
 465 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
 466 
 467 /**
 468  * crypto_engine_exit - free the resources of hardware engine when exit
 469  * @engine: the hardware engine need to be freed
 470  *
 471  * Return 0 for success.
 472  */
 473 int crypto_engine_exit(struct crypto_engine *engine)
 474 {
 475         int ret;
 476 
 477         ret = crypto_engine_stop(engine);
 478         if (ret)
 479                 return ret;
 480 
 481         kthread_destroy_worker(engine->kworker);
 482 
 483         return 0;
 484 }
 485 EXPORT_SYMBOL_GPL(crypto_engine_exit);
 486 
 487 MODULE_LICENSE("GPL");
 488 MODULE_DESCRIPTION("Crypto hardware engine framework");

/* [<][>][^][v][top][bottom][index][help] */