1/**
2 * AES GCM routines supporting the Power 7+ Nest Accelerators driver
3 *
4 * Copyright (C) 2012 International Business Machines Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Author: Kent Yoder <yoder1@us.ibm.com>
20 */
21
22#include <crypto/internal/aead.h>
23#include <crypto/aes.h>
24#include <crypto/algapi.h>
25#include <crypto/scatterwalk.h>
26#include <linux/module.h>
27#include <linux/types.h>
28#include <linux/crypto.h>
29#include <asm/vio.h>
30
31#include "nx_csbcpb.h"
32#include "nx.h"
33
34
35static int gcm_aes_nx_set_key(struct crypto_aead *tfm,
36			      const u8           *in_key,
37			      unsigned int        key_len)
38{
39	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
40	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
41	struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
42
43	nx_ctx_init(nx_ctx, HCOP_FC_AES);
44
45	switch (key_len) {
46	case AES_KEYSIZE_128:
47		NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
48		NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
49		nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
50		break;
51	case AES_KEYSIZE_192:
52		NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
53		NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_192);
54		nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
55		break;
56	case AES_KEYSIZE_256:
57		NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
58		NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_256);
59		nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
60		break;
61	default:
62		return -EINVAL;
63	}
64
65	csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
66	memcpy(csbcpb->cpb.aes_gcm.key, in_key, key_len);
67
68	csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_GCA;
69	memcpy(csbcpb_aead->cpb.aes_gca.key, in_key, key_len);
70
71	return 0;
72}
73
74static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm,
75				  const u8           *in_key,
76				  unsigned int        key_len)
77{
78	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
79	char *nonce = nx_ctx->priv.gcm.nonce;
80	int rc;
81
82	if (key_len < 4)
83		return -EINVAL;
84
85	key_len -= 4;
86
87	rc = gcm_aes_nx_set_key(tfm, in_key, key_len);
88	if (rc)
89		goto out;
90
91	memcpy(nonce, in_key + key_len, 4);
92out:
93	return rc;
94}
95
96static int gcm_aes_nx_setauthsize(struct crypto_aead *tfm,
97				  unsigned int authsize)
98{
99	if (authsize > crypto_aead_alg(tfm)->maxauthsize)
100		return -EINVAL;
101
102	crypto_aead_crt(tfm)->authsize = authsize;
103
104	return 0;
105}
106
107static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm,
108				      unsigned int authsize)
109{
110	switch (authsize) {
111	case 8:
112	case 12:
113	case 16:
114		break;
115	default:
116		return -EINVAL;
117	}
118
119	crypto_aead_crt(tfm)->authsize = authsize;
120
121	return 0;
122}
123
124static int nx_gca(struct nx_crypto_ctx  *nx_ctx,
125		  struct aead_request   *req,
126		  u8                    *out)
127{
128	int rc;
129	struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
130	struct scatter_walk walk;
131	struct nx_sg *nx_sg = nx_ctx->in_sg;
132	unsigned int nbytes = req->assoclen;
133	unsigned int processed = 0, to_process;
134	unsigned int max_sg_len;
135
136	if (nbytes <= AES_BLOCK_SIZE) {
137		scatterwalk_start(&walk, req->assoc);
138		scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG);
139		scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0);
140		return 0;
141	}
142
143	NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION;
144
145	/* page_limit: number of sg entries that fit on one page */
146	max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
147			   nx_ctx->ap->sglen);
148	max_sg_len = min_t(u64, max_sg_len,
149			   nx_ctx->ap->databytelen/NX_PAGE_SIZE);
150
151	do {
152		/*
153		 * to_process: the data chunk to process in this update.
154		 * This value is bound by sg list limits.
155		 */
156		to_process = min_t(u64, nbytes - processed,
157				   nx_ctx->ap->databytelen);
158		to_process = min_t(u64, to_process,
159				   NX_PAGE_SIZE * (max_sg_len - 1));
160
161		nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
162					  req->assoc, processed, &to_process);
163
164		if ((to_process + processed) < nbytes)
165			NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE;
166		else
167			NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE;
168
169		nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg)
170					* sizeof(struct nx_sg);
171
172		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
173				req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
174		if (rc)
175			return rc;
176
177		memcpy(csbcpb_aead->cpb.aes_gca.in_pat,
178				csbcpb_aead->cpb.aes_gca.out_pat,
179				AES_BLOCK_SIZE);
180		NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION;
181
182		atomic_inc(&(nx_ctx->stats->aes_ops));
183		atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
184
185		processed += to_process;
186	} while (processed < nbytes);
187
188	memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE);
189
190	return rc;
191}
192
193static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
194{
195	int rc;
196	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
197	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
198	struct nx_sg *nx_sg;
199	unsigned int nbytes = req->assoclen;
200	unsigned int processed = 0, to_process;
201	unsigned int max_sg_len;
202
203	/* Set GMAC mode */
204	csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC;
205
206	NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
207
208	/* page_limit: number of sg entries that fit on one page */
209	max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
210			   nx_ctx->ap->sglen);
211	max_sg_len = min_t(u64, max_sg_len,
212			   nx_ctx->ap->databytelen/NX_PAGE_SIZE);
213
214	/* Copy IV */
215	memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE);
216
217	do {
218		/*
219		 * to_process: the data chunk to process in this update.
220		 * This value is bound by sg list limits.
221		 */
222		to_process = min_t(u64, nbytes - processed,
223				   nx_ctx->ap->databytelen);
224		to_process = min_t(u64, to_process,
225				   NX_PAGE_SIZE * (max_sg_len - 1));
226
227		nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
228					  req->assoc, processed, &to_process);
229
230		if ((to_process + processed) < nbytes)
231			NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
232		else
233			NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
234
235		nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg)
236					* sizeof(struct nx_sg);
237
238		csbcpb->cpb.aes_gcm.bit_length_data = 0;
239		csbcpb->cpb.aes_gcm.bit_length_aad = 8 * nbytes;
240
241		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
242				req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
243		if (rc)
244			goto out;
245
246		memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
247			csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
248		memcpy(csbcpb->cpb.aes_gcm.in_s0,
249			csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
250
251		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
252
253		atomic_inc(&(nx_ctx->stats->aes_ops));
254		atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
255
256		processed += to_process;
257	} while (processed < nbytes);
258
259out:
260	/* Restore GCM mode */
261	csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
262	return rc;
263}
264
265static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
266		     int enc)
267{
268	int rc;
269	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
270	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
271	char out[AES_BLOCK_SIZE];
272	struct nx_sg *in_sg, *out_sg;
273	int len;
274
275	/* For scenarios where the input message is zero length, AES CTR mode
276	 * may be used. Set the source data to be a single block (16B) of all
277	 * zeros, and set the input IV value to be the same as the GMAC IV
278	 * value. - nx_wb 4.8.1.3 */
279
280	/* Change to ECB mode */
281	csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
282	memcpy(csbcpb->cpb.aes_ecb.key, csbcpb->cpb.aes_gcm.key,
283			sizeof(csbcpb->cpb.aes_ecb.key));
284	if (enc)
285		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
286	else
287		NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
288
289	len = AES_BLOCK_SIZE;
290
291	/* Encrypt the counter/IV */
292	in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info,
293				 &len, nx_ctx->ap->sglen);
294
295	if (len != AES_BLOCK_SIZE)
296		return -EINVAL;
297
298	len = sizeof(out);
299	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, &len,
300				  nx_ctx->ap->sglen);
301
302	if (len != sizeof(out))
303		return -EINVAL;
304
305	nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
306	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
307
308	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
309			   desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
310	if (rc)
311		goto out;
312	atomic_inc(&(nx_ctx->stats->aes_ops));
313
314	/* Copy out the auth tag */
315	memcpy(csbcpb->cpb.aes_gcm.out_pat_or_mac, out,
316			crypto_aead_authsize(crypto_aead_reqtfm(req)));
317out:
318	/* Restore XCBC mode */
319	csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
320
321	/*
322	 * ECB key uses the same region that GCM AAD and counter, so it's safe
323	 * to just fill it with zeroes.
324	 */
325	memset(csbcpb->cpb.aes_ecb.key, 0, sizeof(csbcpb->cpb.aes_ecb.key));
326
327	return rc;
328}
329
330static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
331{
332	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
333	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
334	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
335	struct blkcipher_desc desc;
336	unsigned int nbytes = req->cryptlen;
337	unsigned int processed = 0, to_process;
338	unsigned long irq_flags;
339	int rc = -EINVAL;
340
341	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
342
343	desc.info = rctx->iv;
344	/* initialize the counter */
345	*(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
346
347	if (nbytes == 0) {
348		if (req->assoclen == 0)
349			rc = gcm_empty(req, &desc, enc);
350		else
351			rc = gmac(req, &desc);
352		if (rc)
353			goto out;
354		else
355			goto mac;
356	}
357
358	/* Process associated data */
359	csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8;
360	if (req->assoclen) {
361		rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad);
362		if (rc)
363			goto out;
364	}
365
366	/* Set flags for encryption */
367	NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
368	if (enc) {
369		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
370	} else {
371		NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
372		nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
373	}
374
375	do {
376		to_process = nbytes - processed;
377
378		csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
379		desc.tfm = (struct crypto_blkcipher *) req->base.tfm;
380		rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
381				       req->src, &to_process, processed,
382				       csbcpb->cpb.aes_gcm.iv_or_cnt);
383
384		if (rc)
385			goto out;
386
387		if ((to_process + processed) < nbytes)
388			NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
389		else
390			NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
391
392
393		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
394				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
395		if (rc)
396			goto out;
397
398		memcpy(desc.info, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
399		memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
400			csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
401		memcpy(csbcpb->cpb.aes_gcm.in_s0,
402			csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
403
404		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
405
406		atomic_inc(&(nx_ctx->stats->aes_ops));
407		atomic64_add(csbcpb->csb.processed_byte_count,
408			     &(nx_ctx->stats->aes_bytes));
409
410		processed += to_process;
411	} while (processed < nbytes);
412
413mac:
414	if (enc) {
415		/* copy out the auth tag */
416		scatterwalk_map_and_copy(csbcpb->cpb.aes_gcm.out_pat_or_mac,
417				 req->dst, nbytes,
418				 crypto_aead_authsize(crypto_aead_reqtfm(req)),
419				 SCATTERWALK_TO_SG);
420	} else {
421		u8 *itag = nx_ctx->priv.gcm.iauth_tag;
422		u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac;
423
424		scatterwalk_map_and_copy(itag, req->src, nbytes,
425				 crypto_aead_authsize(crypto_aead_reqtfm(req)),
426				 SCATTERWALK_FROM_SG);
427		rc = memcmp(itag, otag,
428			    crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
429		     -EBADMSG : 0;
430	}
431out:
432	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
433	return rc;
434}
435
436static int gcm_aes_nx_encrypt(struct aead_request *req)
437{
438	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
439	char *iv = rctx->iv;
440
441	memcpy(iv, req->iv, 12);
442
443	return gcm_aes_nx_crypt(req, 1);
444}
445
446static int gcm_aes_nx_decrypt(struct aead_request *req)
447{
448	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
449	char *iv = rctx->iv;
450
451	memcpy(iv, req->iv, 12);
452
453	return gcm_aes_nx_crypt(req, 0);
454}
455
456static int gcm4106_aes_nx_encrypt(struct aead_request *req)
457{
458	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
459	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
460	char *iv = rctx->iv;
461	char *nonce = nx_ctx->priv.gcm.nonce;
462
463	memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
464	memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
465
466	return gcm_aes_nx_crypt(req, 1);
467}
468
469static int gcm4106_aes_nx_decrypt(struct aead_request *req)
470{
471	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
472	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
473	char *iv = rctx->iv;
474	char *nonce = nx_ctx->priv.gcm.nonce;
475
476	memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
477	memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
478
479	return gcm_aes_nx_crypt(req, 0);
480}
481
482/* tell the block cipher walk routines that this is a stream cipher by
483 * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
484 * during encrypt/decrypt doesn't solve this problem, because it calls
485 * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
486 * but instead uses this tfm->blocksize. */
487struct crypto_alg nx_gcm_aes_alg = {
488	.cra_name        = "gcm(aes)",
489	.cra_driver_name = "gcm-aes-nx",
490	.cra_priority    = 300,
491	.cra_flags       = CRYPTO_ALG_TYPE_AEAD,
492	.cra_blocksize   = 1,
493	.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
494	.cra_type        = &crypto_aead_type,
495	.cra_module      = THIS_MODULE,
496	.cra_init        = nx_crypto_ctx_aes_gcm_init,
497	.cra_exit        = nx_crypto_ctx_exit,
498	.cra_aead = {
499		.ivsize      = AES_BLOCK_SIZE,
500		.maxauthsize = AES_BLOCK_SIZE,
501		.setkey      = gcm_aes_nx_set_key,
502		.setauthsize = gcm_aes_nx_setauthsize,
503		.encrypt     = gcm_aes_nx_encrypt,
504		.decrypt     = gcm_aes_nx_decrypt,
505	}
506};
507
508struct crypto_alg nx_gcm4106_aes_alg = {
509	.cra_name        = "rfc4106(gcm(aes))",
510	.cra_driver_name = "rfc4106-gcm-aes-nx",
511	.cra_priority    = 300,
512	.cra_flags       = CRYPTO_ALG_TYPE_AEAD,
513	.cra_blocksize   = 1,
514	.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
515	.cra_type        = &crypto_nivaead_type,
516	.cra_module      = THIS_MODULE,
517	.cra_init        = nx_crypto_ctx_aes_gcm_init,
518	.cra_exit        = nx_crypto_ctx_exit,
519	.cra_aead = {
520		.ivsize      = 8,
521		.maxauthsize = AES_BLOCK_SIZE,
522		.geniv       = "seqiv",
523		.setkey      = gcm4106_aes_nx_set_key,
524		.setauthsize = gcm4106_aes_nx_setauthsize,
525		.encrypt     = gcm4106_aes_nx_encrypt,
526		.decrypt     = gcm4106_aes_nx_decrypt,
527	}
528};
529