1/**
2 * AES CCM routines supporting the Power 7+ Nest Accelerators driver
3 *
4 * Copyright (C) 2012 International Business Machines Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Author: Kent Yoder <yoder1@us.ibm.com>
20 */
21
22#include <crypto/internal/aead.h>
23#include <crypto/aes.h>
24#include <crypto/algapi.h>
25#include <crypto/scatterwalk.h>
26#include <linux/module.h>
27#include <linux/types.h>
28#include <linux/crypto.h>
29#include <asm/vio.h>
30
31#include "nx_csbcpb.h"
32#include "nx.h"
33
34
35static int ccm_aes_nx_set_key(struct crypto_aead *tfm,
36			      const u8           *in_key,
37			      unsigned int        key_len)
38{
39	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
40	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
41	struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
42
43	nx_ctx_init(nx_ctx, HCOP_FC_AES);
44
45	switch (key_len) {
46	case AES_KEYSIZE_128:
47		NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
48		NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
49		nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
50		break;
51	default:
52		return -EINVAL;
53	}
54
55	csbcpb->cpb.hdr.mode = NX_MODE_AES_CCM;
56	memcpy(csbcpb->cpb.aes_ccm.key, in_key, key_len);
57
58	csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_CCA;
59	memcpy(csbcpb_aead->cpb.aes_cca.key, in_key, key_len);
60
61	return 0;
62
63}
64
65static int ccm4309_aes_nx_set_key(struct crypto_aead *tfm,
66				  const u8           *in_key,
67				  unsigned int        key_len)
68{
69	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
70
71	if (key_len < 3)
72		return -EINVAL;
73
74	key_len -= 3;
75
76	memcpy(nx_ctx->priv.ccm.nonce, in_key + key_len, 3);
77
78	return ccm_aes_nx_set_key(tfm, in_key, key_len);
79}
80
81static int ccm_aes_nx_setauthsize(struct crypto_aead *tfm,
82				  unsigned int authsize)
83{
84	switch (authsize) {
85	case 4:
86	case 6:
87	case 8:
88	case 10:
89	case 12:
90	case 14:
91	case 16:
92		break;
93	default:
94		return -EINVAL;
95	}
96
97	crypto_aead_crt(tfm)->authsize = authsize;
98
99	return 0;
100}
101
102static int ccm4309_aes_nx_setauthsize(struct crypto_aead *tfm,
103				      unsigned int authsize)
104{
105	switch (authsize) {
106	case 8:
107	case 12:
108	case 16:
109		break;
110	default:
111		return -EINVAL;
112	}
113
114	crypto_aead_crt(tfm)->authsize = authsize;
115
116	return 0;
117}
118
119/* taken from crypto/ccm.c */
120static int set_msg_len(u8 *block, unsigned int msglen, int csize)
121{
122	__be32 data;
123
124	memset(block, 0, csize);
125	block += csize;
126
127	if (csize >= 4)
128		csize = 4;
129	else if (msglen > (unsigned int)(1 << (8 * csize)))
130		return -EOVERFLOW;
131
132	data = cpu_to_be32(msglen);
133	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
134
135	return 0;
136}
137
138/* taken from crypto/ccm.c */
139static inline int crypto_ccm_check_iv(const u8 *iv)
140{
141	/* 2 <= L <= 8, so 1 <= L' <= 7. */
142	if (1 > iv[0] || iv[0] > 7)
143		return -EINVAL;
144
145	return 0;
146}
147
148/* based on code from crypto/ccm.c */
149static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize,
150		       unsigned int cryptlen, u8 *b0)
151{
152	unsigned int l, lp, m = authsize;
153	int rc;
154
155	memcpy(b0, iv, 16);
156
157	lp = b0[0];
158	l = lp + 1;
159
160	/* set m, bits 3-5 */
161	*b0 |= (8 * ((m - 2) / 2));
162
163	/* set adata, bit 6, if associated data is used */
164	if (assoclen)
165		*b0 |= 64;
166
167	rc = set_msg_len(b0 + 16 - l, cryptlen, l);
168
169	return rc;
170}
171
172static int generate_pat(u8                   *iv,
173			struct aead_request  *req,
174			struct nx_crypto_ctx *nx_ctx,
175			unsigned int          authsize,
176			unsigned int          nbytes,
177			u8                   *out)
178{
179	struct nx_sg *nx_insg = nx_ctx->in_sg;
180	struct nx_sg *nx_outsg = nx_ctx->out_sg;
181	unsigned int iauth_len = 0;
182	u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL;
183	int rc;
184	unsigned int max_sg_len;
185
186	/* zero the ctr value */
187	memset(iv + 15 - iv[0], 0, iv[0] + 1);
188
189	/* page 78 of nx_wb.pdf has,
190	 * Note: RFC3610 allows the AAD data to be up to 2^64 -1 bytes
191	 * in length. If a full message is used, the AES CCA implementation
192	 * restricts the maximum AAD length to 2^32 -1 bytes.
193	 * If partial messages are used, the implementation supports
194	 * 2^64 -1 bytes maximum AAD length.
195	 *
196	 * However, in the cryptoapi's aead_request structure,
197	 * assoclen is an unsigned int, thus it cannot hold a length
198	 * value greater than 2^32 - 1.
199	 * Thus the AAD is further constrained by this and is never
200	 * greater than 2^32.
201	 */
202
203	if (!req->assoclen) {
204		b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
205	} else if (req->assoclen <= 14) {
206		/* if associated data is 14 bytes or less, we do 1 GCM
207		 * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1,
208		 * which is fed in through the source buffers here */
209		b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
210		b1 = nx_ctx->priv.ccm.iauth_tag;
211		iauth_len = req->assoclen;
212	} else if (req->assoclen <= 65280) {
213		/* if associated data is less than (2^16 - 2^8), we construct
214		 * B1 differently and feed in the associated data to a CCA
215		 * operation */
216		b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
217		b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
218		iauth_len = 14;
219	} else {
220		b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
221		b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
222		iauth_len = 10;
223	}
224
225	/* generate B0 */
226	rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0);
227	if (rc)
228		return rc;
229
230	/* generate B1:
231	 * add control info for associated data
232	 * RFC 3610 and NIST Special Publication 800-38C
233	 */
234	if (b1) {
235		memset(b1, 0, 16);
236		if (req->assoclen <= 65280) {
237			*(u16 *)b1 = (u16)req->assoclen;
238			scatterwalk_map_and_copy(b1 + 2, req->assoc, 0,
239					 iauth_len, SCATTERWALK_FROM_SG);
240		} else {
241			*(u16 *)b1 = (u16)(0xfffe);
242			*(u32 *)&b1[2] = (u32)req->assoclen;
243			scatterwalk_map_and_copy(b1 + 6, req->assoc, 0,
244					 iauth_len, SCATTERWALK_FROM_SG);
245		}
246	}
247
248	/* now copy any remaining AAD to scatterlist and call nx... */
249	if (!req->assoclen) {
250		return rc;
251	} else if (req->assoclen <= 14) {
252		unsigned int len = 16;
253
254		nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen);
255
256		if (len != 16)
257			return -EINVAL;
258
259		nx_outsg = nx_build_sg_list(nx_outsg, tmp, &len,
260					    nx_ctx->ap->sglen);
261
262		if (len != 16)
263			return -EINVAL;
264
265		/* inlen should be negative, indicating to phyp that its a
266		 * pointer to an sg list */
267		nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) *
268					sizeof(struct nx_sg);
269		nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) *
270					sizeof(struct nx_sg);
271
272		NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT;
273		NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE;
274
275		result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac;
276
277		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
278				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
279		if (rc)
280			return rc;
281
282		atomic_inc(&(nx_ctx->stats->aes_ops));
283		atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
284
285	} else {
286		unsigned int processed = 0, to_process;
287
288		processed += iauth_len;
289
290		/* page_limit: number of sg entries that fit on one page */
291		max_sg_len = min_t(u64, nx_ctx->ap->sglen,
292				nx_driver.of.max_sg_len/sizeof(struct nx_sg));
293		max_sg_len = min_t(u64, max_sg_len,
294				nx_ctx->ap->databytelen/NX_PAGE_SIZE);
295
296		do {
297			to_process = min_t(u32, req->assoclen - processed,
298					   nx_ctx->ap->databytelen);
299
300			nx_insg = nx_walk_and_build(nx_ctx->in_sg,
301						    nx_ctx->ap->sglen,
302						    req->assoc, processed,
303						    &to_process);
304
305			if ((to_process + processed) < req->assoclen) {
306				NX_CPB_FDM(nx_ctx->csbcpb_aead) |=
307					NX_FDM_INTERMEDIATE;
308			} else {
309				NX_CPB_FDM(nx_ctx->csbcpb_aead) &=
310					~NX_FDM_INTERMEDIATE;
311			}
312
313
314			nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) *
315						sizeof(struct nx_sg);
316
317			result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
318
319			rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
320				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
321			if (rc)
322				return rc;
323
324			memcpy(nx_ctx->csbcpb_aead->cpb.aes_cca.b0,
325				nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0,
326				AES_BLOCK_SIZE);
327
328			NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION;
329
330			atomic_inc(&(nx_ctx->stats->aes_ops));
331			atomic64_add(req->assoclen,
332					&(nx_ctx->stats->aes_bytes));
333
334			processed += to_process;
335		} while (processed < req->assoclen);
336
337		result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
338	}
339
340	memcpy(out, result, AES_BLOCK_SIZE);
341
342	return rc;
343}
344
345static int ccm_nx_decrypt(struct aead_request   *req,
346			  struct blkcipher_desc *desc)
347{
348	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
349	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
350	unsigned int nbytes = req->cryptlen;
351	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
352	struct nx_ccm_priv *priv = &nx_ctx->priv.ccm;
353	unsigned long irq_flags;
354	unsigned int processed = 0, to_process;
355	int rc = -1;
356
357	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
358
359	nbytes -= authsize;
360
361	/* copy out the auth tag to compare with later */
362	scatterwalk_map_and_copy(priv->oauth_tag,
363				 req->src, nbytes, authsize,
364				 SCATTERWALK_FROM_SG);
365
366	rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
367			  csbcpb->cpb.aes_ccm.in_pat_or_b0);
368	if (rc)
369		goto out;
370
371	do {
372
373		/* to_process: the AES_BLOCK_SIZE data chunk to process in this
374		 * update. This value is bound by sg list limits.
375		 */
376		to_process = nbytes - processed;
377
378		if ((to_process + processed) < nbytes)
379			NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
380		else
381			NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
382
383		NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
384
385		rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
386					&to_process, processed,
387					csbcpb->cpb.aes_ccm.iv_or_ctr);
388		if (rc)
389			goto out;
390
391		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
392			   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
393		if (rc)
394			goto out;
395
396		/* for partial completion, copy following for next
397		 * entry into loop...
398		 */
399		memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
400		memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
401			csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
402		memcpy(csbcpb->cpb.aes_ccm.in_s0,
403			csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
404
405		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
406
407		/* update stats */
408		atomic_inc(&(nx_ctx->stats->aes_ops));
409		atomic64_add(csbcpb->csb.processed_byte_count,
410			     &(nx_ctx->stats->aes_bytes));
411
412		processed += to_process;
413	} while (processed < nbytes);
414
415	rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
416		    authsize) ? -EBADMSG : 0;
417out:
418	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
419	return rc;
420}
421
422static int ccm_nx_encrypt(struct aead_request   *req,
423			  struct blkcipher_desc *desc)
424{
425	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
426	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
427	unsigned int nbytes = req->cryptlen;
428	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
429	unsigned long irq_flags;
430	unsigned int processed = 0, to_process;
431	int rc = -1;
432
433	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
434
435	rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
436			  csbcpb->cpb.aes_ccm.in_pat_or_b0);
437	if (rc)
438		goto out;
439
440	do {
441		/* to process: the AES_BLOCK_SIZE data chunk to process in this
442		 * update. This value is bound by sg list limits.
443		 */
444		to_process = nbytes - processed;
445
446		if ((to_process + processed) < nbytes)
447			NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
448		else
449			NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
450
451		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
452
453		rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
454					&to_process, processed,
455				       csbcpb->cpb.aes_ccm.iv_or_ctr);
456		if (rc)
457			goto out;
458
459		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
460				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
461		if (rc)
462			goto out;
463
464		/* for partial completion, copy following for next
465		 * entry into loop...
466		 */
467		memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
468		memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
469			csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
470		memcpy(csbcpb->cpb.aes_ccm.in_s0,
471			csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
472
473		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
474
475		/* update stats */
476		atomic_inc(&(nx_ctx->stats->aes_ops));
477		atomic64_add(csbcpb->csb.processed_byte_count,
478			     &(nx_ctx->stats->aes_bytes));
479
480		processed += to_process;
481
482	} while (processed < nbytes);
483
484	/* copy out the auth tag */
485	scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
486				 req->dst, nbytes, authsize,
487				 SCATTERWALK_TO_SG);
488
489out:
490	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
491	return rc;
492}
493
494static int ccm4309_aes_nx_encrypt(struct aead_request *req)
495{
496	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
497	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
498	struct blkcipher_desc desc;
499	u8 *iv = rctx->iv;
500
501	iv[0] = 3;
502	memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
503	memcpy(iv + 4, req->iv, 8);
504
505	desc.info = iv;
506	desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
507
508	return ccm_nx_encrypt(req, &desc);
509}
510
511static int ccm_aes_nx_encrypt(struct aead_request *req)
512{
513	struct blkcipher_desc desc;
514	int rc;
515
516	desc.info = req->iv;
517	desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
518
519	rc = crypto_ccm_check_iv(desc.info);
520	if (rc)
521		return rc;
522
523	return ccm_nx_encrypt(req, &desc);
524}
525
526static int ccm4309_aes_nx_decrypt(struct aead_request *req)
527{
528	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
529	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
530	struct blkcipher_desc desc;
531	u8 *iv = rctx->iv;
532
533	iv[0] = 3;
534	memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
535	memcpy(iv + 4, req->iv, 8);
536
537	desc.info = iv;
538	desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
539
540	return ccm_nx_decrypt(req, &desc);
541}
542
543static int ccm_aes_nx_decrypt(struct aead_request *req)
544{
545	struct blkcipher_desc desc;
546	int rc;
547
548	desc.info = req->iv;
549	desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
550
551	rc = crypto_ccm_check_iv(desc.info);
552	if (rc)
553		return rc;
554
555	return ccm_nx_decrypt(req, &desc);
556}
557
558/* tell the block cipher walk routines that this is a stream cipher by
559 * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
560 * during encrypt/decrypt doesn't solve this problem, because it calls
561 * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
562 * but instead uses this tfm->blocksize. */
563struct crypto_alg nx_ccm_aes_alg = {
564	.cra_name        = "ccm(aes)",
565	.cra_driver_name = "ccm-aes-nx",
566	.cra_priority    = 300,
567	.cra_flags       = CRYPTO_ALG_TYPE_AEAD |
568			   CRYPTO_ALG_NEED_FALLBACK,
569	.cra_blocksize   = 1,
570	.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
571	.cra_type        = &crypto_aead_type,
572	.cra_module      = THIS_MODULE,
573	.cra_init        = nx_crypto_ctx_aes_ccm_init,
574	.cra_exit        = nx_crypto_ctx_exit,
575	.cra_aead = {
576		.ivsize      = AES_BLOCK_SIZE,
577		.maxauthsize = AES_BLOCK_SIZE,
578		.setkey      = ccm_aes_nx_set_key,
579		.setauthsize = ccm_aes_nx_setauthsize,
580		.encrypt     = ccm_aes_nx_encrypt,
581		.decrypt     = ccm_aes_nx_decrypt,
582	}
583};
584
585struct crypto_alg nx_ccm4309_aes_alg = {
586	.cra_name        = "rfc4309(ccm(aes))",
587	.cra_driver_name = "rfc4309-ccm-aes-nx",
588	.cra_priority    = 300,
589	.cra_flags       = CRYPTO_ALG_TYPE_AEAD |
590			   CRYPTO_ALG_NEED_FALLBACK,
591	.cra_blocksize   = 1,
592	.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
593	.cra_type        = &crypto_nivaead_type,
594	.cra_module      = THIS_MODULE,
595	.cra_init        = nx_crypto_ctx_aes_ccm_init,
596	.cra_exit        = nx_crypto_ctx_exit,
597	.cra_aead = {
598		.ivsize      = 8,
599		.maxauthsize = AES_BLOCK_SIZE,
600		.setkey      = ccm4309_aes_nx_set_key,
601		.setauthsize = ccm4309_aes_nx_setauthsize,
602		.encrypt     = ccm4309_aes_nx_encrypt,
603		.decrypt     = ccm4309_aes_nx_decrypt,
604		.geniv       = "seqiv",
605	}
606};
607