This source file includes following definitions.
- crypto_finalize_request
- crypto_pump_requests
- crypto_pump_work
- crypto_transfer_request
- crypto_transfer_request_to_engine
- crypto_transfer_ablkcipher_request_to_engine
- crypto_transfer_aead_request_to_engine
- crypto_transfer_akcipher_request_to_engine
- crypto_transfer_hash_request_to_engine
- crypto_transfer_skcipher_request_to_engine
- crypto_finalize_ablkcipher_request
- crypto_finalize_aead_request
- crypto_finalize_akcipher_request
- crypto_finalize_hash_request
- crypto_finalize_skcipher_request
- crypto_engine_start
- crypto_engine_stop
- crypto_engine_alloc_init
- crypto_engine_exit
1
2
3
4
5
6
7
8
9
10 #include <linux/err.h>
11 #include <linux/delay.h>
12 #include <crypto/engine.h>
13 #include <uapi/linux/sched/types.h>
14 #include "internal.h"
15
16 #define CRYPTO_ENGINE_MAX_QLEN 10
17
18
19
20
21
22
23
24 static void crypto_finalize_request(struct crypto_engine *engine,
25 struct crypto_async_request *req, int err)
26 {
27 unsigned long flags;
28 bool finalize_cur_req = false;
29 int ret;
30 struct crypto_engine_ctx *enginectx;
31
32 spin_lock_irqsave(&engine->queue_lock, flags);
33 if (engine->cur_req == req)
34 finalize_cur_req = true;
35 spin_unlock_irqrestore(&engine->queue_lock, flags);
36
37 if (finalize_cur_req) {
38 enginectx = crypto_tfm_ctx(req->tfm);
39 if (engine->cur_req_prepared &&
40 enginectx->op.unprepare_request) {
41 ret = enginectx->op.unprepare_request(engine, req);
42 if (ret)
43 dev_err(engine->dev, "failed to unprepare request\n");
44 }
45 spin_lock_irqsave(&engine->queue_lock, flags);
46 engine->cur_req = NULL;
47 engine->cur_req_prepared = false;
48 spin_unlock_irqrestore(&engine->queue_lock, flags);
49 }
50
51 req->complete(req, err);
52
53 kthread_queue_work(engine->kworker, &engine->pump_requests);
54 }
55
56
57
58
59
60
61
62
63
64
65 static void crypto_pump_requests(struct crypto_engine *engine,
66 bool in_kthread)
67 {
68 struct crypto_async_request *async_req, *backlog;
69 unsigned long flags;
70 bool was_busy = false;
71 int ret;
72 struct crypto_engine_ctx *enginectx;
73
74 spin_lock_irqsave(&engine->queue_lock, flags);
75
76
77 if (engine->cur_req)
78 goto out;
79
80
81 if (engine->idling) {
82 kthread_queue_work(engine->kworker, &engine->pump_requests);
83 goto out;
84 }
85
86
87 if (!crypto_queue_len(&engine->queue) || !engine->running) {
88 if (!engine->busy)
89 goto out;
90
91
92 if (!in_kthread) {
93 kthread_queue_work(engine->kworker,
94 &engine->pump_requests);
95 goto out;
96 }
97
98 engine->busy = false;
99 engine->idling = true;
100 spin_unlock_irqrestore(&engine->queue_lock, flags);
101
102 if (engine->unprepare_crypt_hardware &&
103 engine->unprepare_crypt_hardware(engine))
104 dev_err(engine->dev, "failed to unprepare crypt hardware\n");
105
106 spin_lock_irqsave(&engine->queue_lock, flags);
107 engine->idling = false;
108 goto out;
109 }
110
111
112 backlog = crypto_get_backlog(&engine->queue);
113 async_req = crypto_dequeue_request(&engine->queue);
114 if (!async_req)
115 goto out;
116
117 engine->cur_req = async_req;
118 if (backlog)
119 backlog->complete(backlog, -EINPROGRESS);
120
121 if (engine->busy)
122 was_busy = true;
123 else
124 engine->busy = true;
125
126 spin_unlock_irqrestore(&engine->queue_lock, flags);
127
128
129 if (!was_busy && engine->prepare_crypt_hardware) {
130 ret = engine->prepare_crypt_hardware(engine);
131 if (ret) {
132 dev_err(engine->dev, "failed to prepare crypt hardware\n");
133 goto req_err;
134 }
135 }
136
137 enginectx = crypto_tfm_ctx(async_req->tfm);
138
139 if (enginectx->op.prepare_request) {
140 ret = enginectx->op.prepare_request(engine, async_req);
141 if (ret) {
142 dev_err(engine->dev, "failed to prepare request: %d\n",
143 ret);
144 goto req_err;
145 }
146 engine->cur_req_prepared = true;
147 }
148 if (!enginectx->op.do_one_request) {
149 dev_err(engine->dev, "failed to do request\n");
150 ret = -EINVAL;
151 goto req_err;
152 }
153 ret = enginectx->op.do_one_request(engine, async_req);
154 if (ret) {
155 dev_err(engine->dev, "Failed to do one request from queue: %d\n", ret);
156 goto req_err;
157 }
158 return;
159
160 req_err:
161 crypto_finalize_request(engine, async_req, ret);
162 return;
163
164 out:
165 spin_unlock_irqrestore(&engine->queue_lock, flags);
166 }
167
168 static void crypto_pump_work(struct kthread_work *work)
169 {
170 struct crypto_engine *engine =
171 container_of(work, struct crypto_engine, pump_requests);
172
173 crypto_pump_requests(engine, true);
174 }
175
176
177
178
179
180
181 static int crypto_transfer_request(struct crypto_engine *engine,
182 struct crypto_async_request *req,
183 bool need_pump)
184 {
185 unsigned long flags;
186 int ret;
187
188 spin_lock_irqsave(&engine->queue_lock, flags);
189
190 if (!engine->running) {
191 spin_unlock_irqrestore(&engine->queue_lock, flags);
192 return -ESHUTDOWN;
193 }
194
195 ret = crypto_enqueue_request(&engine->queue, req);
196
197 if (!engine->busy && need_pump)
198 kthread_queue_work(engine->kworker, &engine->pump_requests);
199
200 spin_unlock_irqrestore(&engine->queue_lock, flags);
201 return ret;
202 }
203
204
205
206
207
208
209
210 static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
211 struct crypto_async_request *req)
212 {
213 return crypto_transfer_request(engine, req, true);
214 }
215
216
217
218
219
220
221
222
223 int crypto_transfer_ablkcipher_request_to_engine(struct crypto_engine *engine,
224 struct ablkcipher_request *req)
225 {
226 return crypto_transfer_request_to_engine(engine, &req->base);
227 }
228 EXPORT_SYMBOL_GPL(crypto_transfer_ablkcipher_request_to_engine);
229
230
231
232
233
234
235
236 int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
237 struct aead_request *req)
238 {
239 return crypto_transfer_request_to_engine(engine, &req->base);
240 }
241 EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
242
243
244
245
246
247
248
249 int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
250 struct akcipher_request *req)
251 {
252 return crypto_transfer_request_to_engine(engine, &req->base);
253 }
254 EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
255
256
257
258
259
260
261
262 int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
263 struct ahash_request *req)
264 {
265 return crypto_transfer_request_to_engine(engine, &req->base);
266 }
267 EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
268
269
270
271
272
273
274
275 int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
276 struct skcipher_request *req)
277 {
278 return crypto_transfer_request_to_engine(engine, &req->base);
279 }
280 EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
281
282
283
284
285
286
287
288
289
290 void crypto_finalize_ablkcipher_request(struct crypto_engine *engine,
291 struct ablkcipher_request *req, int err)
292 {
293 return crypto_finalize_request(engine, &req->base, err);
294 }
295 EXPORT_SYMBOL_GPL(crypto_finalize_ablkcipher_request);
296
297
298
299
300
301
302
303
304 void crypto_finalize_aead_request(struct crypto_engine *engine,
305 struct aead_request *req, int err)
306 {
307 return crypto_finalize_request(engine, &req->base, err);
308 }
309 EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
310
311
312
313
314
315
316
317
318 void crypto_finalize_akcipher_request(struct crypto_engine *engine,
319 struct akcipher_request *req, int err)
320 {
321 return crypto_finalize_request(engine, &req->base, err);
322 }
323 EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
324
325
326
327
328
329
330
331
332 void crypto_finalize_hash_request(struct crypto_engine *engine,
333 struct ahash_request *req, int err)
334 {
335 return crypto_finalize_request(engine, &req->base, err);
336 }
337 EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
338
339
340
341
342
343
344
345
346 void crypto_finalize_skcipher_request(struct crypto_engine *engine,
347 struct skcipher_request *req, int err)
348 {
349 return crypto_finalize_request(engine, &req->base, err);
350 }
351 EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
352
353
354
355
356
357
358
359 int crypto_engine_start(struct crypto_engine *engine)
360 {
361 unsigned long flags;
362
363 spin_lock_irqsave(&engine->queue_lock, flags);
364
365 if (engine->running || engine->busy) {
366 spin_unlock_irqrestore(&engine->queue_lock, flags);
367 return -EBUSY;
368 }
369
370 engine->running = true;
371 spin_unlock_irqrestore(&engine->queue_lock, flags);
372
373 kthread_queue_work(engine->kworker, &engine->pump_requests);
374
375 return 0;
376 }
377 EXPORT_SYMBOL_GPL(crypto_engine_start);
378
379
380
381
382
383
384
385 int crypto_engine_stop(struct crypto_engine *engine)
386 {
387 unsigned long flags;
388 unsigned int limit = 500;
389 int ret = 0;
390
391 spin_lock_irqsave(&engine->queue_lock, flags);
392
393
394
395
396
397 while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
398 spin_unlock_irqrestore(&engine->queue_lock, flags);
399 msleep(20);
400 spin_lock_irqsave(&engine->queue_lock, flags);
401 }
402
403 if (crypto_queue_len(&engine->queue) || engine->busy)
404 ret = -EBUSY;
405 else
406 engine->running = false;
407
408 spin_unlock_irqrestore(&engine->queue_lock, flags);
409
410 if (ret)
411 dev_warn(engine->dev, "could not stop engine\n");
412
413 return ret;
414 }
415 EXPORT_SYMBOL_GPL(crypto_engine_stop);
416
417
418
419
420
421
422
423
424
425
426 struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
427 {
428 struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 };
429 struct crypto_engine *engine;
430
431 if (!dev)
432 return NULL;
433
434 engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
435 if (!engine)
436 return NULL;
437
438 engine->dev = dev;
439 engine->rt = rt;
440 engine->running = false;
441 engine->busy = false;
442 engine->idling = false;
443 engine->cur_req_prepared = false;
444 engine->priv_data = dev;
445 snprintf(engine->name, sizeof(engine->name),
446 "%s-engine", dev_name(dev));
447
448 crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
449 spin_lock_init(&engine->queue_lock);
450
451 engine->kworker = kthread_create_worker(0, "%s", engine->name);
452 if (IS_ERR(engine->kworker)) {
453 dev_err(dev, "failed to create crypto request pump task\n");
454 return NULL;
455 }
456 kthread_init_work(&engine->pump_requests, crypto_pump_work);
457
458 if (engine->rt) {
459 dev_info(dev, "will run requests pump with realtime priority\n");
460 sched_setscheduler(engine->kworker->task, SCHED_FIFO, ¶m);
461 }
462
463 return engine;
464 }
465 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
466
467
468
469
470
471
472
473 int crypto_engine_exit(struct crypto_engine *engine)
474 {
475 int ret;
476
477 ret = crypto_engine_stop(engine);
478 if (ret)
479 return ret;
480
481 kthread_destroy_worker(engine->kworker);
482
483 return 0;
484 }
485 EXPORT_SYMBOL_GPL(crypto_engine_exit);
486
487 MODULE_LICENSE("GPL");
488 MODULE_DESCRIPTION("Crypto hardware engine framework");