This source file includes following definitions.
- virtio_crypto_alg_sg_nents_length
- virtio_crypto_alg_validate_key
- virtio_crypto_alg_ablkcipher_init_session
- virtio_crypto_alg_ablkcipher_close_session
- virtio_crypto_alg_ablkcipher_init_sessions
- virtio_crypto_ablkcipher_setkey
- __virtio_crypto_ablkcipher_do_req
- virtio_crypto_ablkcipher_encrypt
- virtio_crypto_ablkcipher_decrypt
- virtio_crypto_ablkcipher_init
- virtio_crypto_ablkcipher_exit
- virtio_crypto_ablkcipher_crypt_req
- virtio_crypto_ablkcipher_finalize_req
- virtio_crypto_algs_register
- virtio_crypto_algs_unregister
1
2
3
4
5
6
7
8
9 #include <linux/scatterlist.h>
10 #include <crypto/algapi.h>
11 #include <linux/err.h>
12 #include <crypto/scatterwalk.h>
13 #include <linux/atomic.h>
14
15 #include <uapi/linux/virtio_crypto.h>
16 #include "virtio_crypto_common.h"
17
18
19 struct virtio_crypto_ablkcipher_ctx {
20 struct crypto_engine_ctx enginectx;
21 struct virtio_crypto *vcrypto;
22 struct crypto_tfm *tfm;
23
24 struct virtio_crypto_sym_session_info enc_sess_info;
25 struct virtio_crypto_sym_session_info dec_sess_info;
26 };
27
28 struct virtio_crypto_sym_request {
29 struct virtio_crypto_request base;
30
31
32 uint32_t type;
33 struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx;
34 struct ablkcipher_request *ablkcipher_req;
35 uint8_t *iv;
36
37 bool encrypt;
38 };
39
40 struct virtio_crypto_algo {
41 uint32_t algonum;
42 uint32_t service;
43 unsigned int active_devs;
44 struct crypto_alg algo;
45 };
46
47
48
49
50
51 static DEFINE_MUTEX(algs_lock);
52 static void virtio_crypto_ablkcipher_finalize_req(
53 struct virtio_crypto_sym_request *vc_sym_req,
54 struct ablkcipher_request *req,
55 int err);
56
57 static void virtio_crypto_dataq_sym_callback
58 (struct virtio_crypto_request *vc_req, int len)
59 {
60 struct virtio_crypto_sym_request *vc_sym_req =
61 container_of(vc_req, struct virtio_crypto_sym_request, base);
62 struct ablkcipher_request *ablk_req;
63 int error;
64
65
66 if (vc_sym_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
67 switch (vc_req->status) {
68 case VIRTIO_CRYPTO_OK:
69 error = 0;
70 break;
71 case VIRTIO_CRYPTO_INVSESS:
72 case VIRTIO_CRYPTO_ERR:
73 error = -EINVAL;
74 break;
75 case VIRTIO_CRYPTO_BADMSG:
76 error = -EBADMSG;
77 break;
78 default:
79 error = -EIO;
80 break;
81 }
82 ablk_req = vc_sym_req->ablkcipher_req;
83 virtio_crypto_ablkcipher_finalize_req(vc_sym_req,
84 ablk_req, error);
85 }
86 }
87
88 static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
89 {
90 u64 total = 0;
91
92 for (total = 0; sg; sg = sg_next(sg))
93 total += sg->length;
94
95 return total;
96 }
97
98 static int
99 virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
100 {
101 switch (key_len) {
102 case AES_KEYSIZE_128:
103 case AES_KEYSIZE_192:
104 case AES_KEYSIZE_256:
105 *alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
106 break;
107 default:
108 return -EINVAL;
109 }
110 return 0;
111 }
112
113 static int virtio_crypto_alg_ablkcipher_init_session(
114 struct virtio_crypto_ablkcipher_ctx *ctx,
115 uint32_t alg, const uint8_t *key,
116 unsigned int keylen,
117 int encrypt)
118 {
119 struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
120 unsigned int tmp;
121 struct virtio_crypto *vcrypto = ctx->vcrypto;
122 int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
123 int err;
124 unsigned int num_out = 0, num_in = 0;
125
126
127
128
129
130 uint8_t *cipher_key = kmemdup(key, keylen, GFP_ATOMIC);
131
132 if (!cipher_key)
133 return -ENOMEM;
134
135 spin_lock(&vcrypto->ctrl_lock);
136
137 vcrypto->ctrl.header.opcode =
138 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
139 vcrypto->ctrl.header.algo = cpu_to_le32(alg);
140
141 vcrypto->ctrl.header.queue_id = 0;
142
143 vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
144
145 vcrypto->ctrl.u.sym_create_session.op_type =
146 cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
147 vcrypto->ctrl.u.sym_create_session.u.cipher.para.algo =
148 vcrypto->ctrl.header.algo;
149 vcrypto->ctrl.u.sym_create_session.u.cipher.para.keylen =
150 cpu_to_le32(keylen);
151 vcrypto->ctrl.u.sym_create_session.u.cipher.para.op =
152 cpu_to_le32(op);
153
154 sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
155 sgs[num_out++] = &outhdr;
156
157
158 sg_init_one(&key_sg, cipher_key, keylen);
159 sgs[num_out++] = &key_sg;
160
161
162 sg_init_one(&inhdr, &vcrypto->input, sizeof(vcrypto->input));
163 sgs[num_out + num_in++] = &inhdr;
164
165 err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
166 num_in, vcrypto, GFP_ATOMIC);
167 if (err < 0) {
168 spin_unlock(&vcrypto->ctrl_lock);
169 kzfree(cipher_key);
170 return err;
171 }
172 virtqueue_kick(vcrypto->ctrl_vq);
173
174
175
176
177
178 while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
179 !virtqueue_is_broken(vcrypto->ctrl_vq))
180 cpu_relax();
181
182 if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
183 spin_unlock(&vcrypto->ctrl_lock);
184 pr_err("virtio_crypto: Create session failed status: %u\n",
185 le32_to_cpu(vcrypto->input.status));
186 kzfree(cipher_key);
187 return -EINVAL;
188 }
189
190 if (encrypt)
191 ctx->enc_sess_info.session_id =
192 le64_to_cpu(vcrypto->input.session_id);
193 else
194 ctx->dec_sess_info.session_id =
195 le64_to_cpu(vcrypto->input.session_id);
196
197 spin_unlock(&vcrypto->ctrl_lock);
198
199 kzfree(cipher_key);
200 return 0;
201 }
202
203 static int virtio_crypto_alg_ablkcipher_close_session(
204 struct virtio_crypto_ablkcipher_ctx *ctx,
205 int encrypt)
206 {
207 struct scatterlist outhdr, status_sg, *sgs[2];
208 unsigned int tmp;
209 struct virtio_crypto_destroy_session_req *destroy_session;
210 struct virtio_crypto *vcrypto = ctx->vcrypto;
211 int err;
212 unsigned int num_out = 0, num_in = 0;
213
214 spin_lock(&vcrypto->ctrl_lock);
215 vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR;
216
217 vcrypto->ctrl.header.opcode =
218 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
219
220 vcrypto->ctrl.header.queue_id = 0;
221
222 destroy_session = &vcrypto->ctrl.u.destroy_session;
223
224 if (encrypt)
225 destroy_session->session_id =
226 cpu_to_le64(ctx->enc_sess_info.session_id);
227 else
228 destroy_session->session_id =
229 cpu_to_le64(ctx->dec_sess_info.session_id);
230
231 sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
232 sgs[num_out++] = &outhdr;
233
234
235 sg_init_one(&status_sg, &vcrypto->ctrl_status.status,
236 sizeof(vcrypto->ctrl_status.status));
237 sgs[num_out + num_in++] = &status_sg;
238
239 err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
240 num_in, vcrypto, GFP_ATOMIC);
241 if (err < 0) {
242 spin_unlock(&vcrypto->ctrl_lock);
243 return err;
244 }
245 virtqueue_kick(vcrypto->ctrl_vq);
246
247 while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
248 !virtqueue_is_broken(vcrypto->ctrl_vq))
249 cpu_relax();
250
251 if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
252 spin_unlock(&vcrypto->ctrl_lock);
253 pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
254 vcrypto->ctrl_status.status,
255 destroy_session->session_id);
256
257 return -EINVAL;
258 }
259 spin_unlock(&vcrypto->ctrl_lock);
260
261 return 0;
262 }
263
264 static int virtio_crypto_alg_ablkcipher_init_sessions(
265 struct virtio_crypto_ablkcipher_ctx *ctx,
266 const uint8_t *key, unsigned int keylen)
267 {
268 uint32_t alg;
269 int ret;
270 struct virtio_crypto *vcrypto = ctx->vcrypto;
271
272 if (keylen > vcrypto->max_cipher_key_len) {
273 pr_err("virtio_crypto: the key is too long\n");
274 goto bad_key;
275 }
276
277 if (virtio_crypto_alg_validate_key(keylen, &alg))
278 goto bad_key;
279
280
281 ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
282 alg, key, keylen, 1);
283 if (ret)
284 return ret;
285
286 ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
287 alg, key, keylen, 0);
288 if (ret) {
289 virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
290 return ret;
291 }
292 return 0;
293
294 bad_key:
295 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
296 return -EINVAL;
297 }
298
299
300 static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
301 const uint8_t *key,
302 unsigned int keylen)
303 {
304 struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
305 uint32_t alg;
306 int ret;
307
308 ret = virtio_crypto_alg_validate_key(keylen, &alg);
309 if (ret)
310 return ret;
311
312 if (!ctx->vcrypto) {
313
314 int node = virtio_crypto_get_current_node();
315 struct virtio_crypto *vcrypto =
316 virtcrypto_get_dev_node(node,
317 VIRTIO_CRYPTO_SERVICE_CIPHER, alg);
318 if (!vcrypto) {
319 pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
320 return -ENODEV;
321 }
322
323 ctx->vcrypto = vcrypto;
324 } else {
325
326 virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
327 virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
328 }
329
330 ret = virtio_crypto_alg_ablkcipher_init_sessions(ctx, key, keylen);
331 if (ret) {
332 virtcrypto_dev_put(ctx->vcrypto);
333 ctx->vcrypto = NULL;
334
335 return ret;
336 }
337
338 return 0;
339 }
340
341 static int
342 __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
343 struct ablkcipher_request *req,
344 struct data_queue *data_vq)
345 {
346 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
347 struct virtio_crypto_ablkcipher_ctx *ctx = vc_sym_req->ablkcipher_ctx;
348 struct virtio_crypto_request *vc_req = &vc_sym_req->base;
349 unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
350 struct virtio_crypto *vcrypto = ctx->vcrypto;
351 struct virtio_crypto_op_data_req *req_data;
352 int src_nents, dst_nents;
353 int err;
354 unsigned long flags;
355 struct scatterlist outhdr, iv_sg, status_sg, **sgs;
356 u64 dst_len;
357 unsigned int num_out = 0, num_in = 0;
358 int sg_total;
359 uint8_t *iv;
360 struct scatterlist *sg;
361
362 src_nents = sg_nents_for_len(req->src, req->nbytes);
363 if (src_nents < 0) {
364 pr_err("Invalid number of src SG.\n");
365 return src_nents;
366 }
367
368 dst_nents = sg_nents(req->dst);
369
370 pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
371 src_nents, dst_nents);
372
373
374 sg_total = src_nents + dst_nents + 3;
375 sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_KERNEL,
376 dev_to_node(&vcrypto->vdev->dev));
377 if (!sgs)
378 return -ENOMEM;
379
380 req_data = kzalloc_node(sizeof(*req_data), GFP_KERNEL,
381 dev_to_node(&vcrypto->vdev->dev));
382 if (!req_data) {
383 kfree(sgs);
384 return -ENOMEM;
385 }
386
387 vc_req->req_data = req_data;
388 vc_sym_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
389
390 if (vc_sym_req->encrypt) {
391 req_data->header.session_id =
392 cpu_to_le64(ctx->enc_sess_info.session_id);
393 req_data->header.opcode =
394 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT);
395 } else {
396 req_data->header.session_id =
397 cpu_to_le64(ctx->dec_sess_info.session_id);
398 req_data->header.opcode =
399 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
400 }
401 req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
402 req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
403 req_data->u.sym_req.u.cipher.para.src_data_len =
404 cpu_to_le32(req->nbytes);
405
406 dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
407 if (unlikely(dst_len > U32_MAX)) {
408 pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
409 err = -EINVAL;
410 goto free;
411 }
412
413 dst_len = min_t(unsigned int, req->nbytes, dst_len);
414 pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
415 req->nbytes, dst_len);
416
417 if (unlikely(req->nbytes + dst_len + ivsize +
418 sizeof(vc_req->status) > vcrypto->max_size)) {
419 pr_err("virtio_crypto: The length is too big\n");
420 err = -EINVAL;
421 goto free;
422 }
423
424 req_data->u.sym_req.u.cipher.para.dst_data_len =
425 cpu_to_le32((uint32_t)dst_len);
426
427
428 sg_init_one(&outhdr, req_data, sizeof(*req_data));
429 sgs[num_out++] = &outhdr;
430
431
432
433
434
435
436
437 iv = kzalloc_node(ivsize, GFP_ATOMIC,
438 dev_to_node(&vcrypto->vdev->dev));
439 if (!iv) {
440 err = -ENOMEM;
441 goto free;
442 }
443 memcpy(iv, req->info, ivsize);
444 if (!vc_sym_req->encrypt)
445 scatterwalk_map_and_copy(req->info, req->src,
446 req->nbytes - AES_BLOCK_SIZE,
447 AES_BLOCK_SIZE, 0);
448
449 sg_init_one(&iv_sg, iv, ivsize);
450 sgs[num_out++] = &iv_sg;
451 vc_sym_req->iv = iv;
452
453
454 for (sg = req->src; src_nents; sg = sg_next(sg), src_nents--)
455 sgs[num_out++] = sg;
456
457
458 for (sg = req->dst; sg; sg = sg_next(sg))
459 sgs[num_out + num_in++] = sg;
460
461
462 sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
463 sgs[num_out + num_in++] = &status_sg;
464
465 vc_req->sgs = sgs;
466
467 spin_lock_irqsave(&data_vq->lock, flags);
468 err = virtqueue_add_sgs(data_vq->vq, sgs, num_out,
469 num_in, vc_req, GFP_ATOMIC);
470 virtqueue_kick(data_vq->vq);
471 spin_unlock_irqrestore(&data_vq->lock, flags);
472 if (unlikely(err < 0))
473 goto free_iv;
474
475 return 0;
476
477 free_iv:
478 kzfree(iv);
479 free:
480 kzfree(req_data);
481 kfree(sgs);
482 return err;
483 }
484
485 static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
486 {
487 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
488 struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
489 struct virtio_crypto_sym_request *vc_sym_req =
490 ablkcipher_request_ctx(req);
491 struct virtio_crypto_request *vc_req = &vc_sym_req->base;
492 struct virtio_crypto *vcrypto = ctx->vcrypto;
493
494 struct data_queue *data_vq = &vcrypto->data_vq[0];
495
496 if (!req->nbytes)
497 return 0;
498 if (req->nbytes % AES_BLOCK_SIZE)
499 return -EINVAL;
500
501 vc_req->dataq = data_vq;
502 vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
503 vc_sym_req->ablkcipher_ctx = ctx;
504 vc_sym_req->ablkcipher_req = req;
505 vc_sym_req->encrypt = true;
506
507 return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req);
508 }
509
510 static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
511 {
512 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
513 struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
514 struct virtio_crypto_sym_request *vc_sym_req =
515 ablkcipher_request_ctx(req);
516 struct virtio_crypto_request *vc_req = &vc_sym_req->base;
517 struct virtio_crypto *vcrypto = ctx->vcrypto;
518
519 struct data_queue *data_vq = &vcrypto->data_vq[0];
520
521 if (!req->nbytes)
522 return 0;
523 if (req->nbytes % AES_BLOCK_SIZE)
524 return -EINVAL;
525
526 vc_req->dataq = data_vq;
527 vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
528 vc_sym_req->ablkcipher_ctx = ctx;
529 vc_sym_req->ablkcipher_req = req;
530 vc_sym_req->encrypt = false;
531
532 return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req);
533 }
534
535 static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm)
536 {
537 struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
538
539 tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_sym_request);
540 ctx->tfm = tfm;
541
542 ctx->enginectx.op.do_one_request = virtio_crypto_ablkcipher_crypt_req;
543 ctx->enginectx.op.prepare_request = NULL;
544 ctx->enginectx.op.unprepare_request = NULL;
545 return 0;
546 }
547
548 static void virtio_crypto_ablkcipher_exit(struct crypto_tfm *tfm)
549 {
550 struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
551
552 if (!ctx->vcrypto)
553 return;
554
555 virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
556 virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
557 virtcrypto_dev_put(ctx->vcrypto);
558 ctx->vcrypto = NULL;
559 }
560
561 int virtio_crypto_ablkcipher_crypt_req(
562 struct crypto_engine *engine, void *vreq)
563 {
564 struct ablkcipher_request *req = container_of(vreq, struct ablkcipher_request, base);
565 struct virtio_crypto_sym_request *vc_sym_req =
566 ablkcipher_request_ctx(req);
567 struct virtio_crypto_request *vc_req = &vc_sym_req->base;
568 struct data_queue *data_vq = vc_req->dataq;
569 int ret;
570
571 ret = __virtio_crypto_ablkcipher_do_req(vc_sym_req, req, data_vq);
572 if (ret < 0)
573 return ret;
574
575 virtqueue_kick(data_vq->vq);
576
577 return 0;
578 }
579
580 static void virtio_crypto_ablkcipher_finalize_req(
581 struct virtio_crypto_sym_request *vc_sym_req,
582 struct ablkcipher_request *req,
583 int err)
584 {
585 if (vc_sym_req->encrypt)
586 scatterwalk_map_and_copy(req->info, req->dst,
587 req->nbytes - AES_BLOCK_SIZE,
588 AES_BLOCK_SIZE, 0);
589 kzfree(vc_sym_req->iv);
590 virtcrypto_clear_request(&vc_sym_req->base);
591
592 crypto_finalize_ablkcipher_request(vc_sym_req->base.dataq->engine,
593 req, err);
594 }
595
596 static struct virtio_crypto_algo virtio_crypto_algs[] = { {
597 .algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
598 .service = VIRTIO_CRYPTO_SERVICE_CIPHER,
599 .algo = {
600 .cra_name = "cbc(aes)",
601 .cra_driver_name = "virtio_crypto_aes_cbc",
602 .cra_priority = 150,
603 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
604 .cra_blocksize = AES_BLOCK_SIZE,
605 .cra_ctxsize = sizeof(struct virtio_crypto_ablkcipher_ctx),
606 .cra_alignmask = 0,
607 .cra_module = THIS_MODULE,
608 .cra_type = &crypto_ablkcipher_type,
609 .cra_init = virtio_crypto_ablkcipher_init,
610 .cra_exit = virtio_crypto_ablkcipher_exit,
611 .cra_u = {
612 .ablkcipher = {
613 .setkey = virtio_crypto_ablkcipher_setkey,
614 .decrypt = virtio_crypto_ablkcipher_decrypt,
615 .encrypt = virtio_crypto_ablkcipher_encrypt,
616 .min_keysize = AES_MIN_KEY_SIZE,
617 .max_keysize = AES_MAX_KEY_SIZE,
618 .ivsize = AES_BLOCK_SIZE,
619 },
620 },
621 },
622 } };
623
624 int virtio_crypto_algs_register(struct virtio_crypto *vcrypto)
625 {
626 int ret = 0;
627 int i = 0;
628
629 mutex_lock(&algs_lock);
630
631 for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
632
633 uint32_t service = virtio_crypto_algs[i].service;
634 uint32_t algonum = virtio_crypto_algs[i].algonum;
635
636 if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
637 continue;
638
639 if (virtio_crypto_algs[i].active_devs == 0) {
640 ret = crypto_register_alg(&virtio_crypto_algs[i].algo);
641 if (ret)
642 goto unlock;
643 }
644
645 virtio_crypto_algs[i].active_devs++;
646 dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
647 virtio_crypto_algs[i].algo.cra_name);
648 }
649
650 unlock:
651 mutex_unlock(&algs_lock);
652 return ret;
653 }
654
655 void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto)
656 {
657 int i = 0;
658
659 mutex_lock(&algs_lock);
660
661 for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
662
663 uint32_t service = virtio_crypto_algs[i].service;
664 uint32_t algonum = virtio_crypto_algs[i].algonum;
665
666 if (virtio_crypto_algs[i].active_devs == 0 ||
667 !virtcrypto_algo_is_supported(vcrypto, service, algonum))
668 continue;
669
670 if (virtio_crypto_algs[i].active_devs == 1)
671 crypto_unregister_alg(&virtio_crypto_algs[i].algo);
672
673 virtio_crypto_algs[i].active_devs--;
674 }
675
676 mutex_unlock(&algs_lock);
677 }