1/*
2 * Asynchronous block chaining cipher operations.
3 *
4 * This is the asynchronous version of blkcipher.c indicating completion
5 * via a callback.
6 *
7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 */
15
16#include <crypto/internal/skcipher.h>
17#include <linux/cpumask.h>
18#include <linux/err.h>
19#include <linux/kernel.h>
20#include <linux/rtnetlink.h>
21#include <linux/sched.h>
22#include <linux/slab.h>
23#include <linux/seq_file.h>
24#include <linux/cryptouser.h>
25#include <net/netlink.h>
26
27#include <crypto/scatterwalk.h>
28
29#include "internal.h"
30
31struct ablkcipher_buffer {
32	struct list_head	entry;
33	struct scatter_walk	dst;
34	unsigned int		len;
35	void			*data;
36};
37
38enum {
39	ABLKCIPHER_WALK_SLOW = 1 << 0,
40};
41
42static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
43{
44	scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
45}
46
47void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
48{
49	struct ablkcipher_buffer *p, *tmp;
50
51	list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
52		ablkcipher_buffer_write(p);
53		list_del(&p->entry);
54		kfree(p);
55	}
56}
57EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
58
59static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
60					  struct ablkcipher_buffer *p)
61{
62	p->dst = walk->out;
63	list_add_tail(&p->entry, &walk->buffers);
64}
65
66/* Get a spot of the specified length that does not straddle a page.
67 * The caller needs to ensure that there is enough space for this operation.
68 */
69static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
70{
71	u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
72
73	return max(start, end_page);
74}
75
76static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
77						unsigned int bsize)
78{
79	unsigned int n = bsize;
80
81	for (;;) {
82		unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
83
84		if (len_this_page > n)
85			len_this_page = n;
86		scatterwalk_advance(&walk->out, n);
87		if (n == len_this_page)
88			break;
89		n -= len_this_page;
90		scatterwalk_start(&walk->out, sg_next(walk->out.sg));
91	}
92
93	return bsize;
94}
95
96static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
97						unsigned int n)
98{
99	scatterwalk_advance(&walk->in, n);
100	scatterwalk_advance(&walk->out, n);
101
102	return n;
103}
104
105static int ablkcipher_walk_next(struct ablkcipher_request *req,
106				struct ablkcipher_walk *walk);
107
108int ablkcipher_walk_done(struct ablkcipher_request *req,
109			 struct ablkcipher_walk *walk, int err)
110{
111	struct crypto_tfm *tfm = req->base.tfm;
112	unsigned int nbytes = 0;
113
114	if (likely(err >= 0)) {
115		unsigned int n = walk->nbytes - err;
116
117		if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
118			n = ablkcipher_done_fast(walk, n);
119		else if (WARN_ON(err)) {
120			err = -EINVAL;
121			goto err;
122		} else
123			n = ablkcipher_done_slow(walk, n);
124
125		nbytes = walk->total - n;
126		err = 0;
127	}
128
129	scatterwalk_done(&walk->in, 0, nbytes);
130	scatterwalk_done(&walk->out, 1, nbytes);
131
132err:
133	walk->total = nbytes;
134	walk->nbytes = nbytes;
135
136	if (nbytes) {
137		crypto_yield(req->base.flags);
138		return ablkcipher_walk_next(req, walk);
139	}
140
141	if (walk->iv != req->info)
142		memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
143	kfree(walk->iv_buffer);
144
145	return err;
146}
147EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
148
149static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
150				       struct ablkcipher_walk *walk,
151				       unsigned int bsize,
152				       unsigned int alignmask,
153				       void **src_p, void **dst_p)
154{
155	unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
156	struct ablkcipher_buffer *p;
157	void *src, *dst, *base;
158	unsigned int n;
159
160	n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
161	n += (aligned_bsize * 3 - (alignmask + 1) +
162	      (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
163
164	p = kmalloc(n, GFP_ATOMIC);
165	if (!p)
166		return ablkcipher_walk_done(req, walk, -ENOMEM);
167
168	base = p + 1;
169
170	dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
171	src = dst = ablkcipher_get_spot(dst, bsize);
172
173	p->len = bsize;
174	p->data = dst;
175
176	scatterwalk_copychunks(src, &walk->in, bsize, 0);
177
178	ablkcipher_queue_write(walk, p);
179
180	walk->nbytes = bsize;
181	walk->flags |= ABLKCIPHER_WALK_SLOW;
182
183	*src_p = src;
184	*dst_p = dst;
185
186	return 0;
187}
188
189static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
190				     struct crypto_tfm *tfm,
191				     unsigned int alignmask)
192{
193	unsigned bs = walk->blocksize;
194	unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
195	unsigned aligned_bs = ALIGN(bs, alignmask + 1);
196	unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
197			    (alignmask + 1);
198	u8 *iv;
199
200	size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
201	walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
202	if (!walk->iv_buffer)
203		return -ENOMEM;
204
205	iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
206	iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
207	iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
208	iv = ablkcipher_get_spot(iv, ivsize);
209
210	walk->iv = memcpy(iv, walk->iv, ivsize);
211	return 0;
212}
213
214static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
215				       struct ablkcipher_walk *walk)
216{
217	walk->src.page = scatterwalk_page(&walk->in);
218	walk->src.offset = offset_in_page(walk->in.offset);
219	walk->dst.page = scatterwalk_page(&walk->out);
220	walk->dst.offset = offset_in_page(walk->out.offset);
221
222	return 0;
223}
224
225static int ablkcipher_walk_next(struct ablkcipher_request *req,
226				struct ablkcipher_walk *walk)
227{
228	struct crypto_tfm *tfm = req->base.tfm;
229	unsigned int alignmask, bsize, n;
230	void *src, *dst;
231	int err;
232
233	alignmask = crypto_tfm_alg_alignmask(tfm);
234	n = walk->total;
235	if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
236		req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
237		return ablkcipher_walk_done(req, walk, -EINVAL);
238	}
239
240	walk->flags &= ~ABLKCIPHER_WALK_SLOW;
241	src = dst = NULL;
242
243	bsize = min(walk->blocksize, n);
244	n = scatterwalk_clamp(&walk->in, n);
245	n = scatterwalk_clamp(&walk->out, n);
246
247	if (n < bsize ||
248	    !scatterwalk_aligned(&walk->in, alignmask) ||
249	    !scatterwalk_aligned(&walk->out, alignmask)) {
250		err = ablkcipher_next_slow(req, walk, bsize, alignmask,
251					   &src, &dst);
252		goto set_phys_lowmem;
253	}
254
255	walk->nbytes = n;
256
257	return ablkcipher_next_fast(req, walk);
258
259set_phys_lowmem:
260	if (err >= 0) {
261		walk->src.page = virt_to_page(src);
262		walk->dst.page = virt_to_page(dst);
263		walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
264		walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
265	}
266
267	return err;
268}
269
270static int ablkcipher_walk_first(struct ablkcipher_request *req,
271				 struct ablkcipher_walk *walk)
272{
273	struct crypto_tfm *tfm = req->base.tfm;
274	unsigned int alignmask;
275
276	alignmask = crypto_tfm_alg_alignmask(tfm);
277	if (WARN_ON_ONCE(in_irq()))
278		return -EDEADLK;
279
280	walk->nbytes = walk->total;
281	if (unlikely(!walk->total))
282		return 0;
283
284	walk->iv_buffer = NULL;
285	walk->iv = req->info;
286	if (unlikely(((unsigned long)walk->iv & alignmask))) {
287		int err = ablkcipher_copy_iv(walk, tfm, alignmask);
288
289		if (err)
290			return err;
291	}
292
293	scatterwalk_start(&walk->in, walk->in.sg);
294	scatterwalk_start(&walk->out, walk->out.sg);
295
296	return ablkcipher_walk_next(req, walk);
297}
298
299int ablkcipher_walk_phys(struct ablkcipher_request *req,
300			 struct ablkcipher_walk *walk)
301{
302	walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
303	return ablkcipher_walk_first(req, walk);
304}
305EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
306
307static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
308			    unsigned int keylen)
309{
310	struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
311	unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
312	int ret;
313	u8 *buffer, *alignbuffer;
314	unsigned long absize;
315
316	absize = keylen + alignmask;
317	buffer = kmalloc(absize, GFP_ATOMIC);
318	if (!buffer)
319		return -ENOMEM;
320
321	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
322	memcpy(alignbuffer, key, keylen);
323	ret = cipher->setkey(tfm, alignbuffer, keylen);
324	memset(alignbuffer, 0, keylen);
325	kfree(buffer);
326	return ret;
327}
328
329static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
330		  unsigned int keylen)
331{
332	struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
333	unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
334
335	if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
336		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
337		return -EINVAL;
338	}
339
340	if ((unsigned long)key & alignmask)
341		return setkey_unaligned(tfm, key, keylen);
342
343	return cipher->setkey(tfm, key, keylen);
344}
345
346static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
347					      u32 mask)
348{
349	return alg->cra_ctxsize;
350}
351
352int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req)
353{
354	return crypto_ablkcipher_encrypt(&req->creq);
355}
356
357int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req)
358{
359	return crypto_ablkcipher_decrypt(&req->creq);
360}
361
362static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
363				      u32 mask)
364{
365	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
366	struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
367
368	if (alg->ivsize > PAGE_SIZE / 8)
369		return -EINVAL;
370
371	crt->setkey = setkey;
372	crt->encrypt = alg->encrypt;
373	crt->decrypt = alg->decrypt;
374	if (!alg->ivsize) {
375		crt->givencrypt = skcipher_null_givencrypt;
376		crt->givdecrypt = skcipher_null_givdecrypt;
377	}
378	crt->base = __crypto_ablkcipher_cast(tfm);
379	crt->ivsize = alg->ivsize;
380
381	return 0;
382}
383
384#ifdef CONFIG_NET
385static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
386{
387	struct crypto_report_blkcipher rblkcipher;
388
389	strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
390	strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
391		sizeof(rblkcipher.geniv));
392
393	rblkcipher.blocksize = alg->cra_blocksize;
394	rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
395	rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
396	rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
397
398	if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
399		    sizeof(struct crypto_report_blkcipher), &rblkcipher))
400		goto nla_put_failure;
401	return 0;
402
403nla_put_failure:
404	return -EMSGSIZE;
405}
406#else
407static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
408{
409	return -ENOSYS;
410}
411#endif
412
413static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
414	__attribute__ ((unused));
415static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
416{
417	struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
418
419	seq_printf(m, "type         : ablkcipher\n");
420	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
421					     "yes" : "no");
422	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
423	seq_printf(m, "min keysize  : %u\n", ablkcipher->min_keysize);
424	seq_printf(m, "max keysize  : %u\n", ablkcipher->max_keysize);
425	seq_printf(m, "ivsize       : %u\n", ablkcipher->ivsize);
426	seq_printf(m, "geniv        : %s\n", ablkcipher->geniv ?: "<default>");
427}
428
429const struct crypto_type crypto_ablkcipher_type = {
430	.ctxsize = crypto_ablkcipher_ctxsize,
431	.init = crypto_init_ablkcipher_ops,
432#ifdef CONFIG_PROC_FS
433	.show = crypto_ablkcipher_show,
434#endif
435	.report = crypto_ablkcipher_report,
436};
437EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
438
439static int no_givdecrypt(struct skcipher_givcrypt_request *req)
440{
441	return -ENOSYS;
442}
443
444static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
445				      u32 mask)
446{
447	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
448	struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
449
450	if (alg->ivsize > PAGE_SIZE / 8)
451		return -EINVAL;
452
453	crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ?
454		      alg->setkey : setkey;
455	crt->encrypt = alg->encrypt;
456	crt->decrypt = alg->decrypt;
457	crt->givencrypt = alg->givencrypt;
458	crt->givdecrypt = alg->givdecrypt ?: no_givdecrypt;
459	crt->base = __crypto_ablkcipher_cast(tfm);
460	crt->ivsize = alg->ivsize;
461
462	return 0;
463}
464
465#ifdef CONFIG_NET
466static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
467{
468	struct crypto_report_blkcipher rblkcipher;
469
470	strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
471	strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
472		sizeof(rblkcipher.geniv));
473
474	rblkcipher.blocksize = alg->cra_blocksize;
475	rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
476	rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
477	rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
478
479	if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
480		    sizeof(struct crypto_report_blkcipher), &rblkcipher))
481		goto nla_put_failure;
482	return 0;
483
484nla_put_failure:
485	return -EMSGSIZE;
486}
487#else
488static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
489{
490	return -ENOSYS;
491}
492#endif
493
494static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
495	__attribute__ ((unused));
496static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
497{
498	struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
499
500	seq_printf(m, "type         : givcipher\n");
501	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
502					     "yes" : "no");
503	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
504	seq_printf(m, "min keysize  : %u\n", ablkcipher->min_keysize);
505	seq_printf(m, "max keysize  : %u\n", ablkcipher->max_keysize);
506	seq_printf(m, "ivsize       : %u\n", ablkcipher->ivsize);
507	seq_printf(m, "geniv        : %s\n", ablkcipher->geniv ?: "<built-in>");
508}
509
510const struct crypto_type crypto_givcipher_type = {
511	.ctxsize = crypto_ablkcipher_ctxsize,
512	.init = crypto_init_givcipher_ops,
513#ifdef CONFIG_PROC_FS
514	.show = crypto_givcipher_show,
515#endif
516	.report = crypto_givcipher_report,
517};
518EXPORT_SYMBOL_GPL(crypto_givcipher_type);
519
520const char *crypto_default_geniv(const struct crypto_alg *alg)
521{
522	if (((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
523	     CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
524					 alg->cra_ablkcipher.ivsize) !=
525	    alg->cra_blocksize)
526		return "chainiv";
527
528	return "eseqiv";
529}
530
531static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
532{
533	struct rtattr *tb[3];
534	struct {
535		struct rtattr attr;
536		struct crypto_attr_type data;
537	} ptype;
538	struct {
539		struct rtattr attr;
540		struct crypto_attr_alg data;
541	} palg;
542	struct crypto_template *tmpl;
543	struct crypto_instance *inst;
544	struct crypto_alg *larval;
545	const char *geniv;
546	int err;
547
548	larval = crypto_larval_lookup(alg->cra_driver_name,
549				      (type & ~CRYPTO_ALG_TYPE_MASK) |
550				      CRYPTO_ALG_TYPE_GIVCIPHER,
551				      mask | CRYPTO_ALG_TYPE_MASK);
552	err = PTR_ERR(larval);
553	if (IS_ERR(larval))
554		goto out;
555
556	err = -EAGAIN;
557	if (!crypto_is_larval(larval))
558		goto drop_larval;
559
560	ptype.attr.rta_len = sizeof(ptype);
561	ptype.attr.rta_type = CRYPTOA_TYPE;
562	ptype.data.type = type | CRYPTO_ALG_GENIV;
563	/* GENIV tells the template that we're making a default geniv. */
564	ptype.data.mask = mask | CRYPTO_ALG_GENIV;
565	tb[0] = &ptype.attr;
566
567	palg.attr.rta_len = sizeof(palg);
568	palg.attr.rta_type = CRYPTOA_ALG;
569	/* Must use the exact name to locate ourselves. */
570	memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME);
571	tb[1] = &palg.attr;
572
573	tb[2] = NULL;
574
575	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
576	    CRYPTO_ALG_TYPE_BLKCIPHER)
577		geniv = alg->cra_blkcipher.geniv;
578	else
579		geniv = alg->cra_ablkcipher.geniv;
580
581	if (!geniv)
582		geniv = crypto_default_geniv(alg);
583
584	tmpl = crypto_lookup_template(geniv);
585	err = -ENOENT;
586	if (!tmpl)
587		goto kill_larval;
588
589	inst = tmpl->alloc(tb);
590	err = PTR_ERR(inst);
591	if (IS_ERR(inst))
592		goto put_tmpl;
593
594	err = crypto_register_instance(tmpl, inst);
595	if (err) {
596		tmpl->free(inst);
597		goto put_tmpl;
598	}
599
600	/* Redo the lookup to use the instance we just registered. */
601	err = -EAGAIN;
602
603put_tmpl:
604	crypto_tmpl_put(tmpl);
605kill_larval:
606	crypto_larval_kill(larval);
607drop_larval:
608	crypto_mod_put(larval);
609out:
610	crypto_mod_put(alg);
611	return err;
612}
613
614struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask)
615{
616	struct crypto_alg *alg;
617
618	alg = crypto_alg_mod_lookup(name, type, mask);
619	if (IS_ERR(alg))
620		return alg;
621
622	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
623	    CRYPTO_ALG_TYPE_GIVCIPHER)
624		return alg;
625
626	if (!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
627	      CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
628					  alg->cra_ablkcipher.ivsize))
629		return alg;
630
631	crypto_mod_put(alg);
632	alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED,
633				    mask & ~CRYPTO_ALG_TESTED);
634	if (IS_ERR(alg))
635		return alg;
636
637	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
638	    CRYPTO_ALG_TYPE_GIVCIPHER) {
639		if ((alg->cra_flags ^ type ^ ~mask) & CRYPTO_ALG_TESTED) {
640			crypto_mod_put(alg);
641			alg = ERR_PTR(-ENOENT);
642		}
643		return alg;
644	}
645
646	BUG_ON(!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
647		 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
648					     alg->cra_ablkcipher.ivsize));
649
650	return ERR_PTR(crypto_givcipher_default(alg, type, mask));
651}
652EXPORT_SYMBOL_GPL(crypto_lookup_skcipher);
653
654int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
655			 u32 type, u32 mask)
656{
657	struct crypto_alg *alg;
658	int err;
659
660	type = crypto_skcipher_type(type);
661	mask = crypto_skcipher_mask(mask);
662
663	alg = crypto_lookup_skcipher(name, type, mask);
664	if (IS_ERR(alg))
665		return PTR_ERR(alg);
666
667	err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
668	crypto_mod_put(alg);
669	return err;
670}
671EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
672
673struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
674						  u32 type, u32 mask)
675{
676	struct crypto_tfm *tfm;
677	int err;
678
679	type = crypto_skcipher_type(type);
680	mask = crypto_skcipher_mask(mask);
681
682	for (;;) {
683		struct crypto_alg *alg;
684
685		alg = crypto_lookup_skcipher(alg_name, type, mask);
686		if (IS_ERR(alg)) {
687			err = PTR_ERR(alg);
688			goto err;
689		}
690
691		tfm = __crypto_alloc_tfm(alg, type, mask);
692		if (!IS_ERR(tfm))
693			return __crypto_ablkcipher_cast(tfm);
694
695		crypto_mod_put(alg);
696		err = PTR_ERR(tfm);
697
698err:
699		if (err != -EAGAIN)
700			break;
701		if (fatal_signal_pending(current)) {
702			err = -EINTR;
703			break;
704		}
705	}
706
707	return ERR_PTR(err);
708}
709EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher);
710