1/*
2 * Software async crypto daemon.
3 *
4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * Added AEAD support to cryptd.
7 *    Authors: Tadeusz Struk (tadeusz.struk@intel.com)
8 *             Adrian Hoban <adrian.hoban@intel.com>
9 *             Gabriele Paoloni <gabriele.paoloni@intel.com>
10 *             Aidan O'Mahony (aidan.o.mahony@intel.com)
11 *    Copyright (c) 2010, Intel Corporation.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the Free
15 * Software Foundation; either version 2 of the License, or (at your option)
16 * any later version.
17 *
18 */
19
20#include <crypto/algapi.h>
21#include <crypto/internal/hash.h>
22#include <crypto/internal/aead.h>
23#include <crypto/cryptd.h>
24#include <crypto/crypto_wq.h>
25#include <linux/err.h>
26#include <linux/init.h>
27#include <linux/kernel.h>
28#include <linux/list.h>
29#include <linux/module.h>
30#include <linux/scatterlist.h>
31#include <linux/sched.h>
32#include <linux/slab.h>
33
34#define CRYPTD_MAX_CPU_QLEN 100
35
36struct cryptd_cpu_queue {
37	struct crypto_queue queue;
38	struct work_struct work;
39};
40
41struct cryptd_queue {
42	struct cryptd_cpu_queue __percpu *cpu_queue;
43};
44
45struct cryptd_instance_ctx {
46	struct crypto_spawn spawn;
47	struct cryptd_queue *queue;
48};
49
50struct hashd_instance_ctx {
51	struct crypto_shash_spawn spawn;
52	struct cryptd_queue *queue;
53};
54
55struct aead_instance_ctx {
56	struct crypto_aead_spawn aead_spawn;
57	struct cryptd_queue *queue;
58};
59
60struct cryptd_blkcipher_ctx {
61	struct crypto_blkcipher *child;
62};
63
64struct cryptd_blkcipher_request_ctx {
65	crypto_completion_t complete;
66};
67
68struct cryptd_hash_ctx {
69	struct crypto_shash *child;
70};
71
72struct cryptd_hash_request_ctx {
73	crypto_completion_t complete;
74	struct shash_desc desc;
75};
76
77struct cryptd_aead_ctx {
78	struct crypto_aead *child;
79};
80
81struct cryptd_aead_request_ctx {
82	crypto_completion_t complete;
83};
84
85static void cryptd_queue_worker(struct work_struct *work);
86
87static int cryptd_init_queue(struct cryptd_queue *queue,
88			     unsigned int max_cpu_qlen)
89{
90	int cpu;
91	struct cryptd_cpu_queue *cpu_queue;
92
93	queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
94	if (!queue->cpu_queue)
95		return -ENOMEM;
96	for_each_possible_cpu(cpu) {
97		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
98		crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
99		INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
100	}
101	return 0;
102}
103
104static void cryptd_fini_queue(struct cryptd_queue *queue)
105{
106	int cpu;
107	struct cryptd_cpu_queue *cpu_queue;
108
109	for_each_possible_cpu(cpu) {
110		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
111		BUG_ON(cpu_queue->queue.qlen);
112	}
113	free_percpu(queue->cpu_queue);
114}
115
116static int cryptd_enqueue_request(struct cryptd_queue *queue,
117				  struct crypto_async_request *request)
118{
119	int cpu, err;
120	struct cryptd_cpu_queue *cpu_queue;
121
122	cpu = get_cpu();
123	cpu_queue = this_cpu_ptr(queue->cpu_queue);
124	err = crypto_enqueue_request(&cpu_queue->queue, request);
125	queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
126	put_cpu();
127
128	return err;
129}
130
131/* Called in workqueue context, do one real cryption work (via
132 * req->complete) and reschedule itself if there are more work to
133 * do. */
134static void cryptd_queue_worker(struct work_struct *work)
135{
136	struct cryptd_cpu_queue *cpu_queue;
137	struct crypto_async_request *req, *backlog;
138
139	cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
140	/*
141	 * Only handle one request at a time to avoid hogging crypto workqueue.
142	 * preempt_disable/enable is used to prevent being preempted by
143	 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
144	 * cryptd_enqueue_request() being accessed from software interrupts.
145	 */
146	local_bh_disable();
147	preempt_disable();
148	backlog = crypto_get_backlog(&cpu_queue->queue);
149	req = crypto_dequeue_request(&cpu_queue->queue);
150	preempt_enable();
151	local_bh_enable();
152
153	if (!req)
154		return;
155
156	if (backlog)
157		backlog->complete(backlog, -EINPROGRESS);
158	req->complete(req, 0);
159
160	if (cpu_queue->queue.qlen)
161		queue_work(kcrypto_wq, &cpu_queue->work);
162}
163
164static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
165{
166	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
167	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
168	return ictx->queue;
169}
170
171static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
172					 u32 *mask)
173{
174	struct crypto_attr_type *algt;
175
176	algt = crypto_get_attr_type(tb);
177	if (IS_ERR(algt))
178		return;
179
180	*type |= algt->type & CRYPTO_ALG_INTERNAL;
181	*mask |= algt->mask & CRYPTO_ALG_INTERNAL;
182}
183
184static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
185				   const u8 *key, unsigned int keylen)
186{
187	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
188	struct crypto_blkcipher *child = ctx->child;
189	int err;
190
191	crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
192	crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
193					  CRYPTO_TFM_REQ_MASK);
194	err = crypto_blkcipher_setkey(child, key, keylen);
195	crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
196					    CRYPTO_TFM_RES_MASK);
197	return err;
198}
199
200static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
201				   struct crypto_blkcipher *child,
202				   int err,
203				   int (*crypt)(struct blkcipher_desc *desc,
204						struct scatterlist *dst,
205						struct scatterlist *src,
206						unsigned int len))
207{
208	struct cryptd_blkcipher_request_ctx *rctx;
209	struct blkcipher_desc desc;
210
211	rctx = ablkcipher_request_ctx(req);
212
213	if (unlikely(err == -EINPROGRESS))
214		goto out;
215
216	desc.tfm = child;
217	desc.info = req->info;
218	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
219
220	err = crypt(&desc, req->dst, req->src, req->nbytes);
221
222	req->base.complete = rctx->complete;
223
224out:
225	local_bh_disable();
226	rctx->complete(&req->base, err);
227	local_bh_enable();
228}
229
230static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
231{
232	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
233	struct crypto_blkcipher *child = ctx->child;
234
235	cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
236			       crypto_blkcipher_crt(child)->encrypt);
237}
238
239static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
240{
241	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
242	struct crypto_blkcipher *child = ctx->child;
243
244	cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
245			       crypto_blkcipher_crt(child)->decrypt);
246}
247
248static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
249				    crypto_completion_t compl)
250{
251	struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
252	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
253	struct cryptd_queue *queue;
254
255	queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
256	rctx->complete = req->base.complete;
257	req->base.complete = compl;
258
259	return cryptd_enqueue_request(queue, &req->base);
260}
261
262static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
263{
264	return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
265}
266
267static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
268{
269	return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
270}
271
272static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
273{
274	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
275	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
276	struct crypto_spawn *spawn = &ictx->spawn;
277	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
278	struct crypto_blkcipher *cipher;
279
280	cipher = crypto_spawn_blkcipher(spawn);
281	if (IS_ERR(cipher))
282		return PTR_ERR(cipher);
283
284	ctx->child = cipher;
285	tfm->crt_ablkcipher.reqsize =
286		sizeof(struct cryptd_blkcipher_request_ctx);
287	return 0;
288}
289
290static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
291{
292	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
293
294	crypto_free_blkcipher(ctx->child);
295}
296
297static int cryptd_init_instance(struct crypto_instance *inst,
298				struct crypto_alg *alg)
299{
300	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
301		     "cryptd(%s)",
302		     alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
303		return -ENAMETOOLONG;
304
305	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
306
307	inst->alg.cra_priority = alg->cra_priority + 50;
308	inst->alg.cra_blocksize = alg->cra_blocksize;
309	inst->alg.cra_alignmask = alg->cra_alignmask;
310
311	return 0;
312}
313
314static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
315				   unsigned int tail)
316{
317	char *p;
318	struct crypto_instance *inst;
319	int err;
320
321	p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
322	if (!p)
323		return ERR_PTR(-ENOMEM);
324
325	inst = (void *)(p + head);
326
327	err = cryptd_init_instance(inst, alg);
328	if (err)
329		goto out_free_inst;
330
331out:
332	return p;
333
334out_free_inst:
335	kfree(p);
336	p = ERR_PTR(err);
337	goto out;
338}
339
340static int cryptd_create_blkcipher(struct crypto_template *tmpl,
341				   struct rtattr **tb,
342				   struct cryptd_queue *queue)
343{
344	struct cryptd_instance_ctx *ctx;
345	struct crypto_instance *inst;
346	struct crypto_alg *alg;
347	u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
348	u32 mask = CRYPTO_ALG_TYPE_MASK;
349	int err;
350
351	cryptd_check_internal(tb, &type, &mask);
352
353	alg = crypto_get_attr_alg(tb, type, mask);
354	if (IS_ERR(alg))
355		return PTR_ERR(alg);
356
357	inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
358	err = PTR_ERR(inst);
359	if (IS_ERR(inst))
360		goto out_put_alg;
361
362	ctx = crypto_instance_ctx(inst);
363	ctx->queue = queue;
364
365	err = crypto_init_spawn(&ctx->spawn, alg, inst,
366				CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
367	if (err)
368		goto out_free_inst;
369
370	type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
371	if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
372		type |= CRYPTO_ALG_INTERNAL;
373	inst->alg.cra_flags = type;
374	inst->alg.cra_type = &crypto_ablkcipher_type;
375
376	inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
377	inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
378	inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
379
380	inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
381
382	inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
383
384	inst->alg.cra_init = cryptd_blkcipher_init_tfm;
385	inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
386
387	inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
388	inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
389	inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
390
391	err = crypto_register_instance(tmpl, inst);
392	if (err) {
393		crypto_drop_spawn(&ctx->spawn);
394out_free_inst:
395		kfree(inst);
396	}
397
398out_put_alg:
399	crypto_mod_put(alg);
400	return err;
401}
402
403static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
404{
405	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
406	struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
407	struct crypto_shash_spawn *spawn = &ictx->spawn;
408	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
409	struct crypto_shash *hash;
410
411	hash = crypto_spawn_shash(spawn);
412	if (IS_ERR(hash))
413		return PTR_ERR(hash);
414
415	ctx->child = hash;
416	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
417				 sizeof(struct cryptd_hash_request_ctx) +
418				 crypto_shash_descsize(hash));
419	return 0;
420}
421
422static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
423{
424	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
425
426	crypto_free_shash(ctx->child);
427}
428
429static int cryptd_hash_setkey(struct crypto_ahash *parent,
430				   const u8 *key, unsigned int keylen)
431{
432	struct cryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent);
433	struct crypto_shash *child = ctx->child;
434	int err;
435
436	crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
437	crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
438				      CRYPTO_TFM_REQ_MASK);
439	err = crypto_shash_setkey(child, key, keylen);
440	crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
441				       CRYPTO_TFM_RES_MASK);
442	return err;
443}
444
445static int cryptd_hash_enqueue(struct ahash_request *req,
446				crypto_completion_t compl)
447{
448	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
449	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
450	struct cryptd_queue *queue =
451		cryptd_get_queue(crypto_ahash_tfm(tfm));
452
453	rctx->complete = req->base.complete;
454	req->base.complete = compl;
455
456	return cryptd_enqueue_request(queue, &req->base);
457}
458
459static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
460{
461	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
462	struct crypto_shash *child = ctx->child;
463	struct ahash_request *req = ahash_request_cast(req_async);
464	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
465	struct shash_desc *desc = &rctx->desc;
466
467	if (unlikely(err == -EINPROGRESS))
468		goto out;
469
470	desc->tfm = child;
471	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
472
473	err = crypto_shash_init(desc);
474
475	req->base.complete = rctx->complete;
476
477out:
478	local_bh_disable();
479	rctx->complete(&req->base, err);
480	local_bh_enable();
481}
482
483static int cryptd_hash_init_enqueue(struct ahash_request *req)
484{
485	return cryptd_hash_enqueue(req, cryptd_hash_init);
486}
487
488static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
489{
490	struct ahash_request *req = ahash_request_cast(req_async);
491	struct cryptd_hash_request_ctx *rctx;
492
493	rctx = ahash_request_ctx(req);
494
495	if (unlikely(err == -EINPROGRESS))
496		goto out;
497
498	err = shash_ahash_update(req, &rctx->desc);
499
500	req->base.complete = rctx->complete;
501
502out:
503	local_bh_disable();
504	rctx->complete(&req->base, err);
505	local_bh_enable();
506}
507
508static int cryptd_hash_update_enqueue(struct ahash_request *req)
509{
510	return cryptd_hash_enqueue(req, cryptd_hash_update);
511}
512
513static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
514{
515	struct ahash_request *req = ahash_request_cast(req_async);
516	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
517
518	if (unlikely(err == -EINPROGRESS))
519		goto out;
520
521	err = crypto_shash_final(&rctx->desc, req->result);
522
523	req->base.complete = rctx->complete;
524
525out:
526	local_bh_disable();
527	rctx->complete(&req->base, err);
528	local_bh_enable();
529}
530
531static int cryptd_hash_final_enqueue(struct ahash_request *req)
532{
533	return cryptd_hash_enqueue(req, cryptd_hash_final);
534}
535
536static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
537{
538	struct ahash_request *req = ahash_request_cast(req_async);
539	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
540
541	if (unlikely(err == -EINPROGRESS))
542		goto out;
543
544	err = shash_ahash_finup(req, &rctx->desc);
545
546	req->base.complete = rctx->complete;
547
548out:
549	local_bh_disable();
550	rctx->complete(&req->base, err);
551	local_bh_enable();
552}
553
554static int cryptd_hash_finup_enqueue(struct ahash_request *req)
555{
556	return cryptd_hash_enqueue(req, cryptd_hash_finup);
557}
558
559static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
560{
561	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
562	struct crypto_shash *child = ctx->child;
563	struct ahash_request *req = ahash_request_cast(req_async);
564	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
565	struct shash_desc *desc = &rctx->desc;
566
567	if (unlikely(err == -EINPROGRESS))
568		goto out;
569
570	desc->tfm = child;
571	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
572
573	err = shash_ahash_digest(req, desc);
574
575	req->base.complete = rctx->complete;
576
577out:
578	local_bh_disable();
579	rctx->complete(&req->base, err);
580	local_bh_enable();
581}
582
583static int cryptd_hash_digest_enqueue(struct ahash_request *req)
584{
585	return cryptd_hash_enqueue(req, cryptd_hash_digest);
586}
587
588static int cryptd_hash_export(struct ahash_request *req, void *out)
589{
590	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
591
592	return crypto_shash_export(&rctx->desc, out);
593}
594
595static int cryptd_hash_import(struct ahash_request *req, const void *in)
596{
597	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
598
599	return crypto_shash_import(&rctx->desc, in);
600}
601
602static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
603			      struct cryptd_queue *queue)
604{
605	struct hashd_instance_ctx *ctx;
606	struct ahash_instance *inst;
607	struct shash_alg *salg;
608	struct crypto_alg *alg;
609	u32 type = 0;
610	u32 mask = 0;
611	int err;
612
613	cryptd_check_internal(tb, &type, &mask);
614
615	salg = shash_attr_alg(tb[1], type, mask);
616	if (IS_ERR(salg))
617		return PTR_ERR(salg);
618
619	alg = &salg->base;
620	inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
621				     sizeof(*ctx));
622	err = PTR_ERR(inst);
623	if (IS_ERR(inst))
624		goto out_put_alg;
625
626	ctx = ahash_instance_ctx(inst);
627	ctx->queue = queue;
628
629	err = crypto_init_shash_spawn(&ctx->spawn, salg,
630				      ahash_crypto_instance(inst));
631	if (err)
632		goto out_free_inst;
633
634	type = CRYPTO_ALG_ASYNC;
635	if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
636		type |= CRYPTO_ALG_INTERNAL;
637	inst->alg.halg.base.cra_flags = type;
638
639	inst->alg.halg.digestsize = salg->digestsize;
640	inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
641
642	inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
643	inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
644
645	inst->alg.init   = cryptd_hash_init_enqueue;
646	inst->alg.update = cryptd_hash_update_enqueue;
647	inst->alg.final  = cryptd_hash_final_enqueue;
648	inst->alg.finup  = cryptd_hash_finup_enqueue;
649	inst->alg.export = cryptd_hash_export;
650	inst->alg.import = cryptd_hash_import;
651	inst->alg.setkey = cryptd_hash_setkey;
652	inst->alg.digest = cryptd_hash_digest_enqueue;
653
654	err = ahash_register_instance(tmpl, inst);
655	if (err) {
656		crypto_drop_shash(&ctx->spawn);
657out_free_inst:
658		kfree(inst);
659	}
660
661out_put_alg:
662	crypto_mod_put(alg);
663	return err;
664}
665
666static int cryptd_aead_setkey(struct crypto_aead *parent,
667			      const u8 *key, unsigned int keylen)
668{
669	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
670	struct crypto_aead *child = ctx->child;
671
672	return crypto_aead_setkey(child, key, keylen);
673}
674
675static int cryptd_aead_setauthsize(struct crypto_aead *parent,
676				   unsigned int authsize)
677{
678	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
679	struct crypto_aead *child = ctx->child;
680
681	return crypto_aead_setauthsize(child, authsize);
682}
683
684static void cryptd_aead_crypt(struct aead_request *req,
685			struct crypto_aead *child,
686			int err,
687			int (*crypt)(struct aead_request *req))
688{
689	struct cryptd_aead_request_ctx *rctx;
690	crypto_completion_t compl;
691
692	rctx = aead_request_ctx(req);
693	compl = rctx->complete;
694
695	if (unlikely(err == -EINPROGRESS))
696		goto out;
697	aead_request_set_tfm(req, child);
698	err = crypt( req );
699out:
700	local_bh_disable();
701	compl(&req->base, err);
702	local_bh_enable();
703}
704
705static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
706{
707	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
708	struct crypto_aead *child = ctx->child;
709	struct aead_request *req;
710
711	req = container_of(areq, struct aead_request, base);
712	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
713}
714
715static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
716{
717	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
718	struct crypto_aead *child = ctx->child;
719	struct aead_request *req;
720
721	req = container_of(areq, struct aead_request, base);
722	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
723}
724
725static int cryptd_aead_enqueue(struct aead_request *req,
726				    crypto_completion_t compl)
727{
728	struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
729	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
730	struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
731
732	rctx->complete = req->base.complete;
733	req->base.complete = compl;
734	return cryptd_enqueue_request(queue, &req->base);
735}
736
737static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
738{
739	return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
740}
741
742static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
743{
744	return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
745}
746
747static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
748{
749	struct aead_instance *inst = aead_alg_instance(tfm);
750	struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
751	struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
752	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
753	struct crypto_aead *cipher;
754
755	cipher = crypto_spawn_aead(spawn);
756	if (IS_ERR(cipher))
757		return PTR_ERR(cipher);
758
759	ctx->child = cipher;
760	crypto_aead_set_reqsize(
761		tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
762			 crypto_aead_reqsize(cipher)));
763	return 0;
764}
765
766static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
767{
768	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
769	crypto_free_aead(ctx->child);
770}
771
772static int cryptd_create_aead(struct crypto_template *tmpl,
773		              struct rtattr **tb,
774			      struct cryptd_queue *queue)
775{
776	struct aead_instance_ctx *ctx;
777	struct aead_instance *inst;
778	struct aead_alg *alg;
779	const char *name;
780	u32 type = 0;
781	u32 mask = CRYPTO_ALG_ASYNC;
782	int err;
783
784	cryptd_check_internal(tb, &type, &mask);
785
786	name = crypto_attr_alg_name(tb[1]);
787	if (IS_ERR(name))
788		return PTR_ERR(name);
789
790	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
791	if (!inst)
792		return -ENOMEM;
793
794	ctx = aead_instance_ctx(inst);
795	ctx->queue = queue;
796
797	crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst));
798	err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask);
799	if (err)
800		goto out_free_inst;
801
802	alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
803	err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
804	if (err)
805		goto out_drop_aead;
806
807	inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
808				   (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
809	inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
810
811	inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
812	inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
813
814	inst->alg.init = cryptd_aead_init_tfm;
815	inst->alg.exit = cryptd_aead_exit_tfm;
816	inst->alg.setkey = cryptd_aead_setkey;
817	inst->alg.setauthsize = cryptd_aead_setauthsize;
818	inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
819	inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
820
821	err = aead_register_instance(tmpl, inst);
822	if (err) {
823out_drop_aead:
824		crypto_drop_aead(&ctx->aead_spawn);
825out_free_inst:
826		kfree(inst);
827	}
828	return err;
829}
830
831static struct cryptd_queue queue;
832
833static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
834{
835	struct crypto_attr_type *algt;
836
837	algt = crypto_get_attr_type(tb);
838	if (IS_ERR(algt))
839		return PTR_ERR(algt);
840
841	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
842	case CRYPTO_ALG_TYPE_BLKCIPHER:
843		return cryptd_create_blkcipher(tmpl, tb, &queue);
844	case CRYPTO_ALG_TYPE_DIGEST:
845		return cryptd_create_hash(tmpl, tb, &queue);
846	case CRYPTO_ALG_TYPE_AEAD:
847		return cryptd_create_aead(tmpl, tb, &queue);
848	}
849
850	return -EINVAL;
851}
852
853static void cryptd_free(struct crypto_instance *inst)
854{
855	struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
856	struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
857	struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
858
859	switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
860	case CRYPTO_ALG_TYPE_AHASH:
861		crypto_drop_shash(&hctx->spawn);
862		kfree(ahash_instance(inst));
863		return;
864	case CRYPTO_ALG_TYPE_AEAD:
865		crypto_drop_aead(&aead_ctx->aead_spawn);
866		kfree(aead_instance(inst));
867		return;
868	default:
869		crypto_drop_spawn(&ctx->spawn);
870		kfree(inst);
871	}
872}
873
874static struct crypto_template cryptd_tmpl = {
875	.name = "cryptd",
876	.create = cryptd_create,
877	.free = cryptd_free,
878	.module = THIS_MODULE,
879};
880
881struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
882						  u32 type, u32 mask)
883{
884	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
885	struct crypto_tfm *tfm;
886
887	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
888		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
889		return ERR_PTR(-EINVAL);
890	type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
891	type |= CRYPTO_ALG_TYPE_BLKCIPHER;
892	mask &= ~CRYPTO_ALG_TYPE_MASK;
893	mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
894	tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
895	if (IS_ERR(tfm))
896		return ERR_CAST(tfm);
897	if (tfm->__crt_alg->cra_module != THIS_MODULE) {
898		crypto_free_tfm(tfm);
899		return ERR_PTR(-EINVAL);
900	}
901
902	return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
903}
904EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
905
906struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
907{
908	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
909	return ctx->child;
910}
911EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
912
913void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
914{
915	crypto_free_ablkcipher(&tfm->base);
916}
917EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
918
919struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
920					u32 type, u32 mask)
921{
922	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
923	struct crypto_ahash *tfm;
924
925	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
926		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
927		return ERR_PTR(-EINVAL);
928	tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
929	if (IS_ERR(tfm))
930		return ERR_CAST(tfm);
931	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
932		crypto_free_ahash(tfm);
933		return ERR_PTR(-EINVAL);
934	}
935
936	return __cryptd_ahash_cast(tfm);
937}
938EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
939
940struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
941{
942	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
943
944	return ctx->child;
945}
946EXPORT_SYMBOL_GPL(cryptd_ahash_child);
947
948struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
949{
950	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
951	return &rctx->desc;
952}
953EXPORT_SYMBOL_GPL(cryptd_shash_desc);
954
955void cryptd_free_ahash(struct cryptd_ahash *tfm)
956{
957	crypto_free_ahash(&tfm->base);
958}
959EXPORT_SYMBOL_GPL(cryptd_free_ahash);
960
961struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
962						  u32 type, u32 mask)
963{
964	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
965	struct crypto_aead *tfm;
966
967	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
968		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
969		return ERR_PTR(-EINVAL);
970	tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
971	if (IS_ERR(tfm))
972		return ERR_CAST(tfm);
973	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
974		crypto_free_aead(tfm);
975		return ERR_PTR(-EINVAL);
976	}
977	return __cryptd_aead_cast(tfm);
978}
979EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
980
981struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
982{
983	struct cryptd_aead_ctx *ctx;
984	ctx = crypto_aead_ctx(&tfm->base);
985	return ctx->child;
986}
987EXPORT_SYMBOL_GPL(cryptd_aead_child);
988
989void cryptd_free_aead(struct cryptd_aead *tfm)
990{
991	crypto_free_aead(&tfm->base);
992}
993EXPORT_SYMBOL_GPL(cryptd_free_aead);
994
995static int __init cryptd_init(void)
996{
997	int err;
998
999	err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
1000	if (err)
1001		return err;
1002
1003	err = crypto_register_template(&cryptd_tmpl);
1004	if (err)
1005		cryptd_fini_queue(&queue);
1006
1007	return err;
1008}
1009
1010static void __exit cryptd_exit(void)
1011{
1012	cryptd_fini_queue(&queue);
1013	crypto_unregister_template(&cryptd_tmpl);
1014}
1015
1016subsys_initcall(cryptd_init);
1017module_exit(cryptd_exit);
1018
1019MODULE_LICENSE("GPL");
1020MODULE_DESCRIPTION("Software async crypto daemon");
1021MODULE_ALIAS_CRYPTO("cryptd");
1022