1/*
2 * Software async crypto daemon.
3 *
4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * Added AEAD support to cryptd.
7 *    Authors: Tadeusz Struk (tadeusz.struk@intel.com)
8 *             Adrian Hoban <adrian.hoban@intel.com>
9 *             Gabriele Paoloni <gabriele.paoloni@intel.com>
10 *             Aidan O'Mahony (aidan.o.mahony@intel.com)
11 *    Copyright (c) 2010, Intel Corporation.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the Free
15 * Software Foundation; either version 2 of the License, or (at your option)
16 * any later version.
17 *
18 */
19
20#include <crypto/algapi.h>
21#include <crypto/internal/hash.h>
22#include <crypto/internal/aead.h>
23#include <crypto/cryptd.h>
24#include <crypto/crypto_wq.h>
25#include <linux/err.h>
26#include <linux/init.h>
27#include <linux/kernel.h>
28#include <linux/list.h>
29#include <linux/module.h>
30#include <linux/scatterlist.h>
31#include <linux/sched.h>
32#include <linux/slab.h>
33
34#define CRYPTD_MAX_CPU_QLEN 100
35
36struct cryptd_cpu_queue {
37	struct crypto_queue queue;
38	struct work_struct work;
39};
40
41struct cryptd_queue {
42	struct cryptd_cpu_queue __percpu *cpu_queue;
43};
44
45struct cryptd_instance_ctx {
46	struct crypto_spawn spawn;
47	struct cryptd_queue *queue;
48};
49
50struct hashd_instance_ctx {
51	struct crypto_shash_spawn spawn;
52	struct cryptd_queue *queue;
53};
54
55struct aead_instance_ctx {
56	struct crypto_aead_spawn aead_spawn;
57	struct cryptd_queue *queue;
58};
59
60struct cryptd_blkcipher_ctx {
61	struct crypto_blkcipher *child;
62};
63
64struct cryptd_blkcipher_request_ctx {
65	crypto_completion_t complete;
66};
67
68struct cryptd_hash_ctx {
69	struct crypto_shash *child;
70};
71
72struct cryptd_hash_request_ctx {
73	crypto_completion_t complete;
74	struct shash_desc desc;
75};
76
77struct cryptd_aead_ctx {
78	struct crypto_aead *child;
79};
80
81struct cryptd_aead_request_ctx {
82	crypto_completion_t complete;
83};
84
85static void cryptd_queue_worker(struct work_struct *work);
86
87static int cryptd_init_queue(struct cryptd_queue *queue,
88			     unsigned int max_cpu_qlen)
89{
90	int cpu;
91	struct cryptd_cpu_queue *cpu_queue;
92
93	queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
94	if (!queue->cpu_queue)
95		return -ENOMEM;
96	for_each_possible_cpu(cpu) {
97		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
98		crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
99		INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
100	}
101	return 0;
102}
103
104static void cryptd_fini_queue(struct cryptd_queue *queue)
105{
106	int cpu;
107	struct cryptd_cpu_queue *cpu_queue;
108
109	for_each_possible_cpu(cpu) {
110		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
111		BUG_ON(cpu_queue->queue.qlen);
112	}
113	free_percpu(queue->cpu_queue);
114}
115
116static int cryptd_enqueue_request(struct cryptd_queue *queue,
117				  struct crypto_async_request *request)
118{
119	int cpu, err;
120	struct cryptd_cpu_queue *cpu_queue;
121
122	cpu = get_cpu();
123	cpu_queue = this_cpu_ptr(queue->cpu_queue);
124	err = crypto_enqueue_request(&cpu_queue->queue, request);
125	queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
126	put_cpu();
127
128	return err;
129}
130
131/* Called in workqueue context, do one real cryption work (via
132 * req->complete) and reschedule itself if there are more work to
133 * do. */
134static void cryptd_queue_worker(struct work_struct *work)
135{
136	struct cryptd_cpu_queue *cpu_queue;
137	struct crypto_async_request *req, *backlog;
138
139	cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
140	/*
141	 * Only handle one request at a time to avoid hogging crypto workqueue.
142	 * preempt_disable/enable is used to prevent being preempted by
143	 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
144	 * cryptd_enqueue_request() being accessed from software interrupts.
145	 */
146	local_bh_disable();
147	preempt_disable();
148	backlog = crypto_get_backlog(&cpu_queue->queue);
149	req = crypto_dequeue_request(&cpu_queue->queue);
150	preempt_enable();
151	local_bh_enable();
152
153	if (!req)
154		return;
155
156	if (backlog)
157		backlog->complete(backlog, -EINPROGRESS);
158	req->complete(req, 0);
159
160	if (cpu_queue->queue.qlen)
161		queue_work(kcrypto_wq, &cpu_queue->work);
162}
163
164static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
165{
166	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
167	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
168	return ictx->queue;
169}
170
171static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
172					 u32 *mask)
173{
174	struct crypto_attr_type *algt;
175
176	algt = crypto_get_attr_type(tb);
177	if (IS_ERR(algt))
178		return;
179	if ((algt->type & CRYPTO_ALG_INTERNAL))
180		*type |= CRYPTO_ALG_INTERNAL;
181	if ((algt->mask & CRYPTO_ALG_INTERNAL))
182		*mask |= CRYPTO_ALG_INTERNAL;
183}
184
185static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
186				   const u8 *key, unsigned int keylen)
187{
188	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
189	struct crypto_blkcipher *child = ctx->child;
190	int err;
191
192	crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
193	crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
194					  CRYPTO_TFM_REQ_MASK);
195	err = crypto_blkcipher_setkey(child, key, keylen);
196	crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
197					    CRYPTO_TFM_RES_MASK);
198	return err;
199}
200
201static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
202				   struct crypto_blkcipher *child,
203				   int err,
204				   int (*crypt)(struct blkcipher_desc *desc,
205						struct scatterlist *dst,
206						struct scatterlist *src,
207						unsigned int len))
208{
209	struct cryptd_blkcipher_request_ctx *rctx;
210	struct blkcipher_desc desc;
211
212	rctx = ablkcipher_request_ctx(req);
213
214	if (unlikely(err == -EINPROGRESS))
215		goto out;
216
217	desc.tfm = child;
218	desc.info = req->info;
219	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
220
221	err = crypt(&desc, req->dst, req->src, req->nbytes);
222
223	req->base.complete = rctx->complete;
224
225out:
226	local_bh_disable();
227	rctx->complete(&req->base, err);
228	local_bh_enable();
229}
230
231static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
232{
233	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
234	struct crypto_blkcipher *child = ctx->child;
235
236	cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
237			       crypto_blkcipher_crt(child)->encrypt);
238}
239
240static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
241{
242	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
243	struct crypto_blkcipher *child = ctx->child;
244
245	cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
246			       crypto_blkcipher_crt(child)->decrypt);
247}
248
249static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
250				    crypto_completion_t compl)
251{
252	struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
253	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
254	struct cryptd_queue *queue;
255
256	queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
257	rctx->complete = req->base.complete;
258	req->base.complete = compl;
259
260	return cryptd_enqueue_request(queue, &req->base);
261}
262
263static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
264{
265	return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
266}
267
268static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
269{
270	return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
271}
272
273static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
274{
275	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
276	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
277	struct crypto_spawn *spawn = &ictx->spawn;
278	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
279	struct crypto_blkcipher *cipher;
280
281	cipher = crypto_spawn_blkcipher(spawn);
282	if (IS_ERR(cipher))
283		return PTR_ERR(cipher);
284
285	ctx->child = cipher;
286	tfm->crt_ablkcipher.reqsize =
287		sizeof(struct cryptd_blkcipher_request_ctx);
288	return 0;
289}
290
291static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
292{
293	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
294
295	crypto_free_blkcipher(ctx->child);
296}
297
298static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
299				   unsigned int tail)
300{
301	char *p;
302	struct crypto_instance *inst;
303	int err;
304
305	p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
306	if (!p)
307		return ERR_PTR(-ENOMEM);
308
309	inst = (void *)(p + head);
310
311	err = -ENAMETOOLONG;
312	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
313		     "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
314		goto out_free_inst;
315
316	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
317
318	inst->alg.cra_priority = alg->cra_priority + 50;
319	inst->alg.cra_blocksize = alg->cra_blocksize;
320	inst->alg.cra_alignmask = alg->cra_alignmask;
321
322out:
323	return p;
324
325out_free_inst:
326	kfree(p);
327	p = ERR_PTR(err);
328	goto out;
329}
330
331static int cryptd_create_blkcipher(struct crypto_template *tmpl,
332				   struct rtattr **tb,
333				   struct cryptd_queue *queue)
334{
335	struct cryptd_instance_ctx *ctx;
336	struct crypto_instance *inst;
337	struct crypto_alg *alg;
338	u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
339	u32 mask = CRYPTO_ALG_TYPE_MASK;
340	int err;
341
342	cryptd_check_internal(tb, &type, &mask);
343
344	alg = crypto_get_attr_alg(tb, type, mask);
345	if (IS_ERR(alg))
346		return PTR_ERR(alg);
347
348	inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
349	err = PTR_ERR(inst);
350	if (IS_ERR(inst))
351		goto out_put_alg;
352
353	ctx = crypto_instance_ctx(inst);
354	ctx->queue = queue;
355
356	err = crypto_init_spawn(&ctx->spawn, alg, inst,
357				CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
358	if (err)
359		goto out_free_inst;
360
361	type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
362	if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
363		type |= CRYPTO_ALG_INTERNAL;
364	inst->alg.cra_flags = type;
365	inst->alg.cra_type = &crypto_ablkcipher_type;
366
367	inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
368	inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
369	inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
370
371	inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
372
373	inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
374
375	inst->alg.cra_init = cryptd_blkcipher_init_tfm;
376	inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
377
378	inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
379	inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
380	inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
381
382	err = crypto_register_instance(tmpl, inst);
383	if (err) {
384		crypto_drop_spawn(&ctx->spawn);
385out_free_inst:
386		kfree(inst);
387	}
388
389out_put_alg:
390	crypto_mod_put(alg);
391	return err;
392}
393
394static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
395{
396	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
397	struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
398	struct crypto_shash_spawn *spawn = &ictx->spawn;
399	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
400	struct crypto_shash *hash;
401
402	hash = crypto_spawn_shash(spawn);
403	if (IS_ERR(hash))
404		return PTR_ERR(hash);
405
406	ctx->child = hash;
407	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
408				 sizeof(struct cryptd_hash_request_ctx) +
409				 crypto_shash_descsize(hash));
410	return 0;
411}
412
413static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
414{
415	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
416
417	crypto_free_shash(ctx->child);
418}
419
420static int cryptd_hash_setkey(struct crypto_ahash *parent,
421				   const u8 *key, unsigned int keylen)
422{
423	struct cryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent);
424	struct crypto_shash *child = ctx->child;
425	int err;
426
427	crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
428	crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
429				      CRYPTO_TFM_REQ_MASK);
430	err = crypto_shash_setkey(child, key, keylen);
431	crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
432				       CRYPTO_TFM_RES_MASK);
433	return err;
434}
435
436static int cryptd_hash_enqueue(struct ahash_request *req,
437				crypto_completion_t compl)
438{
439	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
440	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
441	struct cryptd_queue *queue =
442		cryptd_get_queue(crypto_ahash_tfm(tfm));
443
444	rctx->complete = req->base.complete;
445	req->base.complete = compl;
446
447	return cryptd_enqueue_request(queue, &req->base);
448}
449
450static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
451{
452	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
453	struct crypto_shash *child = ctx->child;
454	struct ahash_request *req = ahash_request_cast(req_async);
455	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
456	struct shash_desc *desc = &rctx->desc;
457
458	if (unlikely(err == -EINPROGRESS))
459		goto out;
460
461	desc->tfm = child;
462	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
463
464	err = crypto_shash_init(desc);
465
466	req->base.complete = rctx->complete;
467
468out:
469	local_bh_disable();
470	rctx->complete(&req->base, err);
471	local_bh_enable();
472}
473
474static int cryptd_hash_init_enqueue(struct ahash_request *req)
475{
476	return cryptd_hash_enqueue(req, cryptd_hash_init);
477}
478
479static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
480{
481	struct ahash_request *req = ahash_request_cast(req_async);
482	struct cryptd_hash_request_ctx *rctx;
483
484	rctx = ahash_request_ctx(req);
485
486	if (unlikely(err == -EINPROGRESS))
487		goto out;
488
489	err = shash_ahash_update(req, &rctx->desc);
490
491	req->base.complete = rctx->complete;
492
493out:
494	local_bh_disable();
495	rctx->complete(&req->base, err);
496	local_bh_enable();
497}
498
499static int cryptd_hash_update_enqueue(struct ahash_request *req)
500{
501	return cryptd_hash_enqueue(req, cryptd_hash_update);
502}
503
504static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
505{
506	struct ahash_request *req = ahash_request_cast(req_async);
507	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
508
509	if (unlikely(err == -EINPROGRESS))
510		goto out;
511
512	err = crypto_shash_final(&rctx->desc, req->result);
513
514	req->base.complete = rctx->complete;
515
516out:
517	local_bh_disable();
518	rctx->complete(&req->base, err);
519	local_bh_enable();
520}
521
522static int cryptd_hash_final_enqueue(struct ahash_request *req)
523{
524	return cryptd_hash_enqueue(req, cryptd_hash_final);
525}
526
527static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
528{
529	struct ahash_request *req = ahash_request_cast(req_async);
530	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
531
532	if (unlikely(err == -EINPROGRESS))
533		goto out;
534
535	err = shash_ahash_finup(req, &rctx->desc);
536
537	req->base.complete = rctx->complete;
538
539out:
540	local_bh_disable();
541	rctx->complete(&req->base, err);
542	local_bh_enable();
543}
544
545static int cryptd_hash_finup_enqueue(struct ahash_request *req)
546{
547	return cryptd_hash_enqueue(req, cryptd_hash_finup);
548}
549
550static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
551{
552	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
553	struct crypto_shash *child = ctx->child;
554	struct ahash_request *req = ahash_request_cast(req_async);
555	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
556	struct shash_desc *desc = &rctx->desc;
557
558	if (unlikely(err == -EINPROGRESS))
559		goto out;
560
561	desc->tfm = child;
562	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
563
564	err = shash_ahash_digest(req, desc);
565
566	req->base.complete = rctx->complete;
567
568out:
569	local_bh_disable();
570	rctx->complete(&req->base, err);
571	local_bh_enable();
572}
573
574static int cryptd_hash_digest_enqueue(struct ahash_request *req)
575{
576	return cryptd_hash_enqueue(req, cryptd_hash_digest);
577}
578
579static int cryptd_hash_export(struct ahash_request *req, void *out)
580{
581	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
582
583	return crypto_shash_export(&rctx->desc, out);
584}
585
586static int cryptd_hash_import(struct ahash_request *req, const void *in)
587{
588	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
589
590	return crypto_shash_import(&rctx->desc, in);
591}
592
593static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
594			      struct cryptd_queue *queue)
595{
596	struct hashd_instance_ctx *ctx;
597	struct ahash_instance *inst;
598	struct shash_alg *salg;
599	struct crypto_alg *alg;
600	u32 type = 0;
601	u32 mask = 0;
602	int err;
603
604	cryptd_check_internal(tb, &type, &mask);
605
606	salg = shash_attr_alg(tb[1], type, mask);
607	if (IS_ERR(salg))
608		return PTR_ERR(salg);
609
610	alg = &salg->base;
611	inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
612				     sizeof(*ctx));
613	err = PTR_ERR(inst);
614	if (IS_ERR(inst))
615		goto out_put_alg;
616
617	ctx = ahash_instance_ctx(inst);
618	ctx->queue = queue;
619
620	err = crypto_init_shash_spawn(&ctx->spawn, salg,
621				      ahash_crypto_instance(inst));
622	if (err)
623		goto out_free_inst;
624
625	type = CRYPTO_ALG_ASYNC;
626	if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
627		type |= CRYPTO_ALG_INTERNAL;
628	inst->alg.halg.base.cra_flags = type;
629
630	inst->alg.halg.digestsize = salg->digestsize;
631	inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
632
633	inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
634	inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
635
636	inst->alg.init   = cryptd_hash_init_enqueue;
637	inst->alg.update = cryptd_hash_update_enqueue;
638	inst->alg.final  = cryptd_hash_final_enqueue;
639	inst->alg.finup  = cryptd_hash_finup_enqueue;
640	inst->alg.export = cryptd_hash_export;
641	inst->alg.import = cryptd_hash_import;
642	inst->alg.setkey = cryptd_hash_setkey;
643	inst->alg.digest = cryptd_hash_digest_enqueue;
644
645	err = ahash_register_instance(tmpl, inst);
646	if (err) {
647		crypto_drop_shash(&ctx->spawn);
648out_free_inst:
649		kfree(inst);
650	}
651
652out_put_alg:
653	crypto_mod_put(alg);
654	return err;
655}
656
657static void cryptd_aead_crypt(struct aead_request *req,
658			struct crypto_aead *child,
659			int err,
660			int (*crypt)(struct aead_request *req))
661{
662	struct cryptd_aead_request_ctx *rctx;
663	rctx = aead_request_ctx(req);
664
665	if (unlikely(err == -EINPROGRESS))
666		goto out;
667	aead_request_set_tfm(req, child);
668	err = crypt( req );
669	req->base.complete = rctx->complete;
670out:
671	local_bh_disable();
672	rctx->complete(&req->base, err);
673	local_bh_enable();
674}
675
676static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
677{
678	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
679	struct crypto_aead *child = ctx->child;
680	struct aead_request *req;
681
682	req = container_of(areq, struct aead_request, base);
683	cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->encrypt);
684}
685
686static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
687{
688	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
689	struct crypto_aead *child = ctx->child;
690	struct aead_request *req;
691
692	req = container_of(areq, struct aead_request, base);
693	cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->decrypt);
694}
695
696static int cryptd_aead_enqueue(struct aead_request *req,
697				    crypto_completion_t compl)
698{
699	struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
700	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
701	struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
702
703	rctx->complete = req->base.complete;
704	req->base.complete = compl;
705	return cryptd_enqueue_request(queue, &req->base);
706}
707
708static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
709{
710	return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
711}
712
713static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
714{
715	return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
716}
717
718static int cryptd_aead_init_tfm(struct crypto_tfm *tfm)
719{
720	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
721	struct aead_instance_ctx *ictx = crypto_instance_ctx(inst);
722	struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
723	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
724	struct crypto_aead *cipher;
725
726	cipher = crypto_spawn_aead(spawn);
727	if (IS_ERR(cipher))
728		return PTR_ERR(cipher);
729
730	crypto_aead_set_flags(cipher, CRYPTO_TFM_REQ_MAY_SLEEP);
731	ctx->child = cipher;
732	tfm->crt_aead.reqsize = sizeof(struct cryptd_aead_request_ctx);
733	return 0;
734}
735
736static void cryptd_aead_exit_tfm(struct crypto_tfm *tfm)
737{
738	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
739	crypto_free_aead(ctx->child);
740}
741
742static int cryptd_create_aead(struct crypto_template *tmpl,
743		              struct rtattr **tb,
744			      struct cryptd_queue *queue)
745{
746	struct aead_instance_ctx *ctx;
747	struct crypto_instance *inst;
748	struct crypto_alg *alg;
749	u32 type = CRYPTO_ALG_TYPE_AEAD;
750	u32 mask = CRYPTO_ALG_TYPE_MASK;
751	int err;
752
753	cryptd_check_internal(tb, &type, &mask);
754
755	alg = crypto_get_attr_alg(tb, type, mask);
756        if (IS_ERR(alg))
757		return PTR_ERR(alg);
758
759	inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
760	err = PTR_ERR(inst);
761	if (IS_ERR(inst))
762		goto out_put_alg;
763
764	ctx = crypto_instance_ctx(inst);
765	ctx->queue = queue;
766
767	err = crypto_init_spawn(&ctx->aead_spawn.base, alg, inst,
768			CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
769	if (err)
770		goto out_free_inst;
771
772	type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
773	if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
774		type |= CRYPTO_ALG_INTERNAL;
775	inst->alg.cra_flags = type;
776	inst->alg.cra_type = alg->cra_type;
777	inst->alg.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
778	inst->alg.cra_init = cryptd_aead_init_tfm;
779	inst->alg.cra_exit = cryptd_aead_exit_tfm;
780	inst->alg.cra_aead.setkey      = alg->cra_aead.setkey;
781	inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize;
782	inst->alg.cra_aead.geniv       = alg->cra_aead.geniv;
783	inst->alg.cra_aead.ivsize      = alg->cra_aead.ivsize;
784	inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
785	inst->alg.cra_aead.encrypt     = cryptd_aead_encrypt_enqueue;
786	inst->alg.cra_aead.decrypt     = cryptd_aead_decrypt_enqueue;
787	inst->alg.cra_aead.givencrypt  = alg->cra_aead.givencrypt;
788	inst->alg.cra_aead.givdecrypt  = alg->cra_aead.givdecrypt;
789
790	err = crypto_register_instance(tmpl, inst);
791	if (err) {
792		crypto_drop_spawn(&ctx->aead_spawn.base);
793out_free_inst:
794		kfree(inst);
795	}
796out_put_alg:
797	crypto_mod_put(alg);
798	return err;
799}
800
801static struct cryptd_queue queue;
802
803static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
804{
805	struct crypto_attr_type *algt;
806
807	algt = crypto_get_attr_type(tb);
808	if (IS_ERR(algt))
809		return PTR_ERR(algt);
810
811	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
812	case CRYPTO_ALG_TYPE_BLKCIPHER:
813		return cryptd_create_blkcipher(tmpl, tb, &queue);
814	case CRYPTO_ALG_TYPE_DIGEST:
815		return cryptd_create_hash(tmpl, tb, &queue);
816	case CRYPTO_ALG_TYPE_AEAD:
817		return cryptd_create_aead(tmpl, tb, &queue);
818	}
819
820	return -EINVAL;
821}
822
823static void cryptd_free(struct crypto_instance *inst)
824{
825	struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
826	struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
827	struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
828
829	switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
830	case CRYPTO_ALG_TYPE_AHASH:
831		crypto_drop_shash(&hctx->spawn);
832		kfree(ahash_instance(inst));
833		return;
834	case CRYPTO_ALG_TYPE_AEAD:
835		crypto_drop_spawn(&aead_ctx->aead_spawn.base);
836		kfree(inst);
837		return;
838	default:
839		crypto_drop_spawn(&ctx->spawn);
840		kfree(inst);
841	}
842}
843
844static struct crypto_template cryptd_tmpl = {
845	.name = "cryptd",
846	.create = cryptd_create,
847	.free = cryptd_free,
848	.module = THIS_MODULE,
849};
850
851struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
852						  u32 type, u32 mask)
853{
854	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
855	struct crypto_tfm *tfm;
856
857	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
858		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
859		return ERR_PTR(-EINVAL);
860	type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
861	type |= CRYPTO_ALG_TYPE_BLKCIPHER;
862	mask &= ~CRYPTO_ALG_TYPE_MASK;
863	mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
864	tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
865	if (IS_ERR(tfm))
866		return ERR_CAST(tfm);
867	if (tfm->__crt_alg->cra_module != THIS_MODULE) {
868		crypto_free_tfm(tfm);
869		return ERR_PTR(-EINVAL);
870	}
871
872	return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
873}
874EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
875
876struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
877{
878	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
879	return ctx->child;
880}
881EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
882
883void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
884{
885	crypto_free_ablkcipher(&tfm->base);
886}
887EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
888
889struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
890					u32 type, u32 mask)
891{
892	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
893	struct crypto_ahash *tfm;
894
895	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
896		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
897		return ERR_PTR(-EINVAL);
898	tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
899	if (IS_ERR(tfm))
900		return ERR_CAST(tfm);
901	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
902		crypto_free_ahash(tfm);
903		return ERR_PTR(-EINVAL);
904	}
905
906	return __cryptd_ahash_cast(tfm);
907}
908EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
909
910struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
911{
912	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
913
914	return ctx->child;
915}
916EXPORT_SYMBOL_GPL(cryptd_ahash_child);
917
918struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
919{
920	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
921	return &rctx->desc;
922}
923EXPORT_SYMBOL_GPL(cryptd_shash_desc);
924
925void cryptd_free_ahash(struct cryptd_ahash *tfm)
926{
927	crypto_free_ahash(&tfm->base);
928}
929EXPORT_SYMBOL_GPL(cryptd_free_ahash);
930
931struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
932						  u32 type, u32 mask)
933{
934	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
935	struct crypto_aead *tfm;
936
937	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
938		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
939		return ERR_PTR(-EINVAL);
940	tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
941	if (IS_ERR(tfm))
942		return ERR_CAST(tfm);
943	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
944		crypto_free_aead(tfm);
945		return ERR_PTR(-EINVAL);
946	}
947	return __cryptd_aead_cast(tfm);
948}
949EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
950
951struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
952{
953	struct cryptd_aead_ctx *ctx;
954	ctx = crypto_aead_ctx(&tfm->base);
955	return ctx->child;
956}
957EXPORT_SYMBOL_GPL(cryptd_aead_child);
958
959void cryptd_free_aead(struct cryptd_aead *tfm)
960{
961	crypto_free_aead(&tfm->base);
962}
963EXPORT_SYMBOL_GPL(cryptd_free_aead);
964
965static int __init cryptd_init(void)
966{
967	int err;
968
969	err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
970	if (err)
971		return err;
972
973	err = crypto_register_template(&cryptd_tmpl);
974	if (err)
975		cryptd_fini_queue(&queue);
976
977	return err;
978}
979
980static void __exit cryptd_exit(void)
981{
982	cryptd_fini_queue(&queue);
983	crypto_unregister_template(&cryptd_tmpl);
984}
985
986subsys_initcall(cryptd_init);
987module_exit(cryptd_exit);
988
989MODULE_LICENSE("GPL");
990MODULE_DESCRIPTION("Software async crypto daemon");
991MODULE_ALIAS_CRYPTO("cryptd");
992