1 /*
2 * Asynchronous Cryptographic Hash operations.
3 *
4 * This is the asynchronous version of hash.c with notification of
5 * completion via a callback.
6 *
7 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 */
15
16 #include <crypto/internal/hash.h>
17 #include <crypto/scatterwalk.h>
18 #include <linux/bug.h>
19 #include <linux/err.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/seq_file.h>
25 #include <linux/cryptouser.h>
26 #include <net/netlink.h>
27
28 #include "internal.h"
29
30 struct ahash_request_priv {
31 crypto_completion_t complete;
32 void *data;
33 u8 *result;
34 void *ubuf[] CRYPTO_MINALIGN_ATTR;
35 };
36
crypto_ahash_alg(struct crypto_ahash * hash)37 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
38 {
39 return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
40 halg);
41 }
42
hash_walk_next(struct crypto_hash_walk * walk)43 static int hash_walk_next(struct crypto_hash_walk *walk)
44 {
45 unsigned int alignmask = walk->alignmask;
46 unsigned int offset = walk->offset;
47 unsigned int nbytes = min(walk->entrylen,
48 ((unsigned int)(PAGE_SIZE)) - offset);
49
50 if (walk->flags & CRYPTO_ALG_ASYNC)
51 walk->data = kmap(walk->pg);
52 else
53 walk->data = kmap_atomic(walk->pg);
54 walk->data += offset;
55
56 if (offset & alignmask) {
57 unsigned int unaligned = alignmask + 1 - (offset & alignmask);
58
59 if (nbytes > unaligned)
60 nbytes = unaligned;
61 }
62
63 walk->entrylen -= nbytes;
64 return nbytes;
65 }
66
hash_walk_new_entry(struct crypto_hash_walk * walk)67 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
68 {
69 struct scatterlist *sg;
70
71 sg = walk->sg;
72 walk->offset = sg->offset;
73 walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
74 walk->offset = offset_in_page(walk->offset);
75 walk->entrylen = sg->length;
76
77 if (walk->entrylen > walk->total)
78 walk->entrylen = walk->total;
79 walk->total -= walk->entrylen;
80
81 return hash_walk_next(walk);
82 }
83
crypto_hash_walk_done(struct crypto_hash_walk * walk,int err)84 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
85 {
86 unsigned int alignmask = walk->alignmask;
87 unsigned int nbytes = walk->entrylen;
88
89 walk->data -= walk->offset;
90
91 if (nbytes && walk->offset & alignmask && !err) {
92 walk->offset = ALIGN(walk->offset, alignmask + 1);
93 walk->data += walk->offset;
94
95 nbytes = min(nbytes,
96 ((unsigned int)(PAGE_SIZE)) - walk->offset);
97 walk->entrylen -= nbytes;
98
99 return nbytes;
100 }
101
102 if (walk->flags & CRYPTO_ALG_ASYNC)
103 kunmap(walk->pg);
104 else {
105 kunmap_atomic(walk->data);
106 /*
107 * The may sleep test only makes sense for sync users.
108 * Async users don't need to sleep here anyway.
109 */
110 crypto_yield(walk->flags);
111 }
112
113 if (err)
114 return err;
115
116 if (nbytes) {
117 walk->offset = 0;
118 walk->pg++;
119 return hash_walk_next(walk);
120 }
121
122 if (!walk->total)
123 return 0;
124
125 walk->sg = sg_next(walk->sg);
126
127 return hash_walk_new_entry(walk);
128 }
129 EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
130
crypto_hash_walk_first(struct ahash_request * req,struct crypto_hash_walk * walk)131 int crypto_hash_walk_first(struct ahash_request *req,
132 struct crypto_hash_walk *walk)
133 {
134 walk->total = req->nbytes;
135
136 if (!walk->total) {
137 walk->entrylen = 0;
138 return 0;
139 }
140
141 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
142 walk->sg = req->src;
143 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
144
145 return hash_walk_new_entry(walk);
146 }
147 EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
148
crypto_ahash_walk_first(struct ahash_request * req,struct crypto_hash_walk * walk)149 int crypto_ahash_walk_first(struct ahash_request *req,
150 struct crypto_hash_walk *walk)
151 {
152 walk->total = req->nbytes;
153
154 if (!walk->total) {
155 walk->entrylen = 0;
156 return 0;
157 }
158
159 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
160 walk->sg = req->src;
161 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
162 walk->flags |= CRYPTO_ALG_ASYNC;
163
164 BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
165
166 return hash_walk_new_entry(walk);
167 }
168 EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
169
crypto_hash_walk_first_compat(struct hash_desc * hdesc,struct crypto_hash_walk * walk,struct scatterlist * sg,unsigned int len)170 int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
171 struct crypto_hash_walk *walk,
172 struct scatterlist *sg, unsigned int len)
173 {
174 walk->total = len;
175
176 if (!walk->total) {
177 walk->entrylen = 0;
178 return 0;
179 }
180
181 walk->alignmask = crypto_hash_alignmask(hdesc->tfm);
182 walk->sg = sg;
183 walk->flags = hdesc->flags & CRYPTO_TFM_REQ_MASK;
184
185 return hash_walk_new_entry(walk);
186 }
187
ahash_setkey_unaligned(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)188 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
189 unsigned int keylen)
190 {
191 unsigned long alignmask = crypto_ahash_alignmask(tfm);
192 int ret;
193 u8 *buffer, *alignbuffer;
194 unsigned long absize;
195
196 absize = keylen + alignmask;
197 buffer = kmalloc(absize, GFP_KERNEL);
198 if (!buffer)
199 return -ENOMEM;
200
201 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
202 memcpy(alignbuffer, key, keylen);
203 ret = tfm->setkey(tfm, alignbuffer, keylen);
204 kzfree(buffer);
205 return ret;
206 }
207
crypto_ahash_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)208 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
209 unsigned int keylen)
210 {
211 unsigned long alignmask = crypto_ahash_alignmask(tfm);
212
213 if ((unsigned long)key & alignmask)
214 return ahash_setkey_unaligned(tfm, key, keylen);
215
216 return tfm->setkey(tfm, key, keylen);
217 }
218 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
219
ahash_nosetkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)220 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
221 unsigned int keylen)
222 {
223 return -ENOSYS;
224 }
225
ahash_align_buffer_size(unsigned len,unsigned long mask)226 static inline unsigned int ahash_align_buffer_size(unsigned len,
227 unsigned long mask)
228 {
229 return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
230 }
231
ahash_save_req(struct ahash_request * req,crypto_completion_t cplt)232 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
233 {
234 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
235 unsigned long alignmask = crypto_ahash_alignmask(tfm);
236 unsigned int ds = crypto_ahash_digestsize(tfm);
237 struct ahash_request_priv *priv;
238
239 priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
240 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
241 GFP_KERNEL : GFP_ATOMIC);
242 if (!priv)
243 return -ENOMEM;
244
245 /*
246 * WARNING: Voodoo programming below!
247 *
248 * The code below is obscure and hard to understand, thus explanation
249 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
250 * to understand the layout of structures used here!
251 *
252 * The code here will replace portions of the ORIGINAL request with
253 * pointers to new code and buffers so the hashing operation can store
254 * the result in aligned buffer. We will call the modified request
255 * an ADJUSTED request.
256 *
257 * The newly mangled request will look as such:
258 *
259 * req {
260 * .result = ADJUSTED[new aligned buffer]
261 * .base.complete = ADJUSTED[pointer to completion function]
262 * .base.data = ADJUSTED[*req (pointer to self)]
263 * .priv = ADJUSTED[new priv] {
264 * .result = ORIGINAL(result)
265 * .complete = ORIGINAL(base.complete)
266 * .data = ORIGINAL(base.data)
267 * }
268 */
269
270 priv->result = req->result;
271 priv->complete = req->base.complete;
272 priv->data = req->base.data;
273 /*
274 * WARNING: We do not backup req->priv here! The req->priv
275 * is for internal use of the Crypto API and the
276 * user must _NOT_ _EVER_ depend on it's content!
277 */
278
279 req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
280 req->base.complete = cplt;
281 req->base.data = req;
282 req->priv = priv;
283
284 return 0;
285 }
286
ahash_restore_req(struct ahash_request * req)287 static void ahash_restore_req(struct ahash_request *req)
288 {
289 struct ahash_request_priv *priv = req->priv;
290
291 /* Restore the original crypto request. */
292 req->result = priv->result;
293 req->base.complete = priv->complete;
294 req->base.data = priv->data;
295 req->priv = NULL;
296
297 /* Free the req->priv.priv from the ADJUSTED request. */
298 kzfree(priv);
299 }
300
ahash_op_unaligned_finish(struct ahash_request * req,int err)301 static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
302 {
303 struct ahash_request_priv *priv = req->priv;
304
305 if (err == -EINPROGRESS)
306 return;
307
308 if (!err)
309 memcpy(priv->result, req->result,
310 crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
311
312 ahash_restore_req(req);
313 }
314
ahash_op_unaligned_done(struct crypto_async_request * req,int err)315 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
316 {
317 struct ahash_request *areq = req->data;
318
319 /*
320 * Restore the original request, see ahash_op_unaligned() for what
321 * goes where.
322 *
323 * The "struct ahash_request *req" here is in fact the "req.base"
324 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
325 * is a pointer to self, it is also the ADJUSTED "req" .
326 */
327
328 /* First copy req->result into req->priv.result */
329 ahash_op_unaligned_finish(areq, err);
330
331 /* Complete the ORIGINAL request. */
332 areq->base.complete(&areq->base, err);
333 }
334
ahash_op_unaligned(struct ahash_request * req,int (* op)(struct ahash_request *))335 static int ahash_op_unaligned(struct ahash_request *req,
336 int (*op)(struct ahash_request *))
337 {
338 int err;
339
340 err = ahash_save_req(req, ahash_op_unaligned_done);
341 if (err)
342 return err;
343
344 err = op(req);
345 ahash_op_unaligned_finish(req, err);
346
347 return err;
348 }
349
crypto_ahash_op(struct ahash_request * req,int (* op)(struct ahash_request *))350 static int crypto_ahash_op(struct ahash_request *req,
351 int (*op)(struct ahash_request *))
352 {
353 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
354 unsigned long alignmask = crypto_ahash_alignmask(tfm);
355
356 if ((unsigned long)req->result & alignmask)
357 return ahash_op_unaligned(req, op);
358
359 return op(req);
360 }
361
crypto_ahash_final(struct ahash_request * req)362 int crypto_ahash_final(struct ahash_request *req)
363 {
364 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
365 }
366 EXPORT_SYMBOL_GPL(crypto_ahash_final);
367
crypto_ahash_finup(struct ahash_request * req)368 int crypto_ahash_finup(struct ahash_request *req)
369 {
370 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
371 }
372 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
373
crypto_ahash_digest(struct ahash_request * req)374 int crypto_ahash_digest(struct ahash_request *req)
375 {
376 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
377 }
378 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
379
ahash_def_finup_finish2(struct ahash_request * req,int err)380 static void ahash_def_finup_finish2(struct ahash_request *req, int err)
381 {
382 struct ahash_request_priv *priv = req->priv;
383
384 if (err == -EINPROGRESS)
385 return;
386
387 if (!err)
388 memcpy(priv->result, req->result,
389 crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
390
391 ahash_restore_req(req);
392 }
393
ahash_def_finup_done2(struct crypto_async_request * req,int err)394 static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
395 {
396 struct ahash_request *areq = req->data;
397
398 ahash_def_finup_finish2(areq, err);
399
400 areq->base.complete(&areq->base, err);
401 }
402
ahash_def_finup_finish1(struct ahash_request * req,int err)403 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
404 {
405 if (err)
406 goto out;
407
408 req->base.complete = ahash_def_finup_done2;
409 req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
410 err = crypto_ahash_reqtfm(req)->final(req);
411
412 out:
413 ahash_def_finup_finish2(req, err);
414 return err;
415 }
416
ahash_def_finup_done1(struct crypto_async_request * req,int err)417 static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
418 {
419 struct ahash_request *areq = req->data;
420
421 err = ahash_def_finup_finish1(areq, err);
422
423 areq->base.complete(&areq->base, err);
424 }
425
ahash_def_finup(struct ahash_request * req)426 static int ahash_def_finup(struct ahash_request *req)
427 {
428 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
429 int err;
430
431 err = ahash_save_req(req, ahash_def_finup_done1);
432 if (err)
433 return err;
434
435 err = tfm->update(req);
436 return ahash_def_finup_finish1(req, err);
437 }
438
ahash_no_export(struct ahash_request * req,void * out)439 static int ahash_no_export(struct ahash_request *req, void *out)
440 {
441 return -ENOSYS;
442 }
443
ahash_no_import(struct ahash_request * req,const void * in)444 static int ahash_no_import(struct ahash_request *req, const void *in)
445 {
446 return -ENOSYS;
447 }
448
crypto_ahash_init_tfm(struct crypto_tfm * tfm)449 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
450 {
451 struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
452 struct ahash_alg *alg = crypto_ahash_alg(hash);
453
454 hash->setkey = ahash_nosetkey;
455 hash->has_setkey = false;
456 hash->export = ahash_no_export;
457 hash->import = ahash_no_import;
458
459 if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
460 return crypto_init_shash_ops_async(tfm);
461
462 hash->init = alg->init;
463 hash->update = alg->update;
464 hash->final = alg->final;
465 hash->finup = alg->finup ?: ahash_def_finup;
466 hash->digest = alg->digest;
467
468 if (alg->setkey) {
469 hash->setkey = alg->setkey;
470 hash->has_setkey = true;
471 }
472 if (alg->export)
473 hash->export = alg->export;
474 if (alg->import)
475 hash->import = alg->import;
476
477 return 0;
478 }
479
crypto_ahash_extsize(struct crypto_alg * alg)480 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
481 {
482 if (alg->cra_type == &crypto_ahash_type)
483 return alg->cra_ctxsize;
484
485 return sizeof(struct crypto_shash *);
486 }
487
488 #ifdef CONFIG_NET
crypto_ahash_report(struct sk_buff * skb,struct crypto_alg * alg)489 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
490 {
491 struct crypto_report_hash rhash;
492
493 strncpy(rhash.type, "ahash", sizeof(rhash.type));
494
495 rhash.blocksize = alg->cra_blocksize;
496 rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
497
498 if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
499 sizeof(struct crypto_report_hash), &rhash))
500 goto nla_put_failure;
501 return 0;
502
503 nla_put_failure:
504 return -EMSGSIZE;
505 }
506 #else
crypto_ahash_report(struct sk_buff * skb,struct crypto_alg * alg)507 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
508 {
509 return -ENOSYS;
510 }
511 #endif
512
513 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
514 __attribute__ ((unused));
crypto_ahash_show(struct seq_file * m,struct crypto_alg * alg)515 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
516 {
517 seq_printf(m, "type : ahash\n");
518 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
519 "yes" : "no");
520 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
521 seq_printf(m, "digestsize : %u\n",
522 __crypto_hash_alg_common(alg)->digestsize);
523 }
524
525 const struct crypto_type crypto_ahash_type = {
526 .extsize = crypto_ahash_extsize,
527 .init_tfm = crypto_ahash_init_tfm,
528 #ifdef CONFIG_PROC_FS
529 .show = crypto_ahash_show,
530 #endif
531 .report = crypto_ahash_report,
532 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
533 .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
534 .type = CRYPTO_ALG_TYPE_AHASH,
535 .tfmsize = offsetof(struct crypto_ahash, base),
536 };
537 EXPORT_SYMBOL_GPL(crypto_ahash_type);
538
crypto_alloc_ahash(const char * alg_name,u32 type,u32 mask)539 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
540 u32 mask)
541 {
542 return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
543 }
544 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
545
ahash_prepare_alg(struct ahash_alg * alg)546 static int ahash_prepare_alg(struct ahash_alg *alg)
547 {
548 struct crypto_alg *base = &alg->halg.base;
549
550 if (alg->halg.digestsize > PAGE_SIZE / 8 ||
551 alg->halg.statesize > PAGE_SIZE / 8 ||
552 alg->halg.statesize == 0)
553 return -EINVAL;
554
555 base->cra_type = &crypto_ahash_type;
556 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
557 base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
558
559 return 0;
560 }
561
crypto_register_ahash(struct ahash_alg * alg)562 int crypto_register_ahash(struct ahash_alg *alg)
563 {
564 struct crypto_alg *base = &alg->halg.base;
565 int err;
566
567 err = ahash_prepare_alg(alg);
568 if (err)
569 return err;
570
571 return crypto_register_alg(base);
572 }
573 EXPORT_SYMBOL_GPL(crypto_register_ahash);
574
crypto_unregister_ahash(struct ahash_alg * alg)575 int crypto_unregister_ahash(struct ahash_alg *alg)
576 {
577 return crypto_unregister_alg(&alg->halg.base);
578 }
579 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
580
ahash_register_instance(struct crypto_template * tmpl,struct ahash_instance * inst)581 int ahash_register_instance(struct crypto_template *tmpl,
582 struct ahash_instance *inst)
583 {
584 int err;
585
586 err = ahash_prepare_alg(&inst->alg);
587 if (err)
588 return err;
589
590 return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
591 }
592 EXPORT_SYMBOL_GPL(ahash_register_instance);
593
ahash_free_instance(struct crypto_instance * inst)594 void ahash_free_instance(struct crypto_instance *inst)
595 {
596 crypto_drop_spawn(crypto_instance_ctx(inst));
597 kfree(ahash_instance(inst));
598 }
599 EXPORT_SYMBOL_GPL(ahash_free_instance);
600
crypto_init_ahash_spawn(struct crypto_ahash_spawn * spawn,struct hash_alg_common * alg,struct crypto_instance * inst)601 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
602 struct hash_alg_common *alg,
603 struct crypto_instance *inst)
604 {
605 return crypto_init_spawn2(&spawn->base, &alg->base, inst,
606 &crypto_ahash_type);
607 }
608 EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
609
ahash_attr_alg(struct rtattr * rta,u32 type,u32 mask)610 struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
611 {
612 struct crypto_alg *alg;
613
614 alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
615 return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
616 }
617 EXPORT_SYMBOL_GPL(ahash_attr_alg);
618
619 MODULE_LICENSE("GPL");
620 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
621