1/*
2 * AMD Cryptographic Coprocessor (CCP) AES XTS crypto API support
3 *
4 * Copyright (C) 2013 Advanced Micro Devices, Inc.
5 *
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/sched.h>
15#include <linux/delay.h>
16#include <linux/scatterlist.h>
17#include <linux/crypto.h>
18#include <crypto/algapi.h>
19#include <crypto/aes.h>
20#include <crypto/scatterwalk.h>
21
22#include "ccp-crypto.h"
23
24struct ccp_aes_xts_def {
25	const char *name;
26	const char *drv_name;
27};
28
29static struct ccp_aes_xts_def aes_xts_algs[] = {
30	{
31		.name		= "xts(aes)",
32		.drv_name	= "xts-aes-ccp",
33	},
34};
35
36struct ccp_unit_size_map {
37	unsigned int size;
38	u32 value;
39};
40
41static struct ccp_unit_size_map unit_size_map[] = {
42	{
43		.size	= 4096,
44		.value	= CCP_XTS_AES_UNIT_SIZE_4096,
45	},
46	{
47		.size	= 2048,
48		.value	= CCP_XTS_AES_UNIT_SIZE_2048,
49	},
50	{
51		.size	= 1024,
52		.value	= CCP_XTS_AES_UNIT_SIZE_1024,
53	},
54	{
55		.size	= 512,
56		.value	= CCP_XTS_AES_UNIT_SIZE_512,
57	},
58	{
59		.size	= 256,
60		.value	= CCP_XTS_AES_UNIT_SIZE__LAST,
61	},
62	{
63		.size	= 128,
64		.value	= CCP_XTS_AES_UNIT_SIZE__LAST,
65	},
66	{
67		.size	= 64,
68		.value	= CCP_XTS_AES_UNIT_SIZE__LAST,
69	},
70	{
71		.size	= 32,
72		.value	= CCP_XTS_AES_UNIT_SIZE__LAST,
73	},
74	{
75		.size	= 16,
76		.value	= CCP_XTS_AES_UNIT_SIZE_16,
77	},
78	{
79		.size	= 1,
80		.value	= CCP_XTS_AES_UNIT_SIZE__LAST,
81	},
82};
83
84static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int ret)
85{
86	struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
87	struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
88
89	if (ret)
90		return ret;
91
92	memcpy(req->info, rctx->iv, AES_BLOCK_SIZE);
93
94	return 0;
95}
96
97static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
98			      unsigned int key_len)
99{
100	struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
101
102	/* Only support 128-bit AES key with a 128-bit Tweak key,
103	 * otherwise use the fallback
104	 */
105	switch (key_len) {
106	case AES_KEYSIZE_128 * 2:
107		memcpy(ctx->u.aes.key, key, key_len);
108		break;
109	}
110	ctx->u.aes.key_len = key_len / 2;
111	sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
112
113	return crypto_ablkcipher_setkey(ctx->u.aes.tfm_ablkcipher, key,
114					key_len);
115}
116
117static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
118			     unsigned int encrypt)
119{
120	struct crypto_tfm *tfm =
121		crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
122	struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
123	struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
124	unsigned int unit;
125	u32 unit_size;
126	int ret;
127
128	if (!ctx->u.aes.key_len)
129		return -EINVAL;
130
131	if (req->nbytes & (AES_BLOCK_SIZE - 1))
132		return -EINVAL;
133
134	if (!req->info)
135		return -EINVAL;
136
137	unit_size = CCP_XTS_AES_UNIT_SIZE__LAST;
138	if (req->nbytes <= unit_size_map[0].size) {
139		for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) {
140			if (!(req->nbytes & (unit_size_map[unit].size - 1))) {
141				unit_size = unit_size_map[unit].value;
142				break;
143			}
144		}
145	}
146
147	if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) ||
148	    (ctx->u.aes.key_len != AES_KEYSIZE_128)) {
149		/* Use the fallback to process the request for any
150		 * unsupported unit sizes or key sizes
151		 */
152		ablkcipher_request_set_tfm(req, ctx->u.aes.tfm_ablkcipher);
153		ret = (encrypt) ? crypto_ablkcipher_encrypt(req) :
154				  crypto_ablkcipher_decrypt(req);
155		ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
156
157		return ret;
158	}
159
160	memcpy(rctx->iv, req->info, AES_BLOCK_SIZE);
161	sg_init_one(&rctx->iv_sg, rctx->iv, AES_BLOCK_SIZE);
162
163	memset(&rctx->cmd, 0, sizeof(rctx->cmd));
164	INIT_LIST_HEAD(&rctx->cmd.entry);
165	rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
166	rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
167					   : CCP_AES_ACTION_DECRYPT;
168	rctx->cmd.u.xts.unit_size = unit_size;
169	rctx->cmd.u.xts.key = &ctx->u.aes.key_sg;
170	rctx->cmd.u.xts.key_len = ctx->u.aes.key_len;
171	rctx->cmd.u.xts.iv = &rctx->iv_sg;
172	rctx->cmd.u.xts.iv_len = AES_BLOCK_SIZE;
173	rctx->cmd.u.xts.src = req->src;
174	rctx->cmd.u.xts.src_len = req->nbytes;
175	rctx->cmd.u.xts.dst = req->dst;
176
177	ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
178
179	return ret;
180}
181
182static int ccp_aes_xts_encrypt(struct ablkcipher_request *req)
183{
184	return ccp_aes_xts_crypt(req, 1);
185}
186
187static int ccp_aes_xts_decrypt(struct ablkcipher_request *req)
188{
189	return ccp_aes_xts_crypt(req, 0);
190}
191
192static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm)
193{
194	struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
195	struct crypto_ablkcipher *fallback_tfm;
196
197	ctx->complete = ccp_aes_xts_complete;
198	ctx->u.aes.key_len = 0;
199
200	fallback_tfm = crypto_alloc_ablkcipher(crypto_tfm_alg_name(tfm), 0,
201					       CRYPTO_ALG_ASYNC |
202					       CRYPTO_ALG_NEED_FALLBACK);
203	if (IS_ERR(fallback_tfm)) {
204		pr_warn("could not load fallback driver %s\n",
205			crypto_tfm_alg_name(tfm));
206		return PTR_ERR(fallback_tfm);
207	}
208	ctx->u.aes.tfm_ablkcipher = fallback_tfm;
209
210	tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx) +
211				      fallback_tfm->base.crt_ablkcipher.reqsize;
212
213	return 0;
214}
215
216static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm)
217{
218	struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
219
220	if (ctx->u.aes.tfm_ablkcipher)
221		crypto_free_ablkcipher(ctx->u.aes.tfm_ablkcipher);
222	ctx->u.aes.tfm_ablkcipher = NULL;
223}
224
225static int ccp_register_aes_xts_alg(struct list_head *head,
226				    const struct ccp_aes_xts_def *def)
227{
228	struct ccp_crypto_ablkcipher_alg *ccp_alg;
229	struct crypto_alg *alg;
230	int ret;
231
232	ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
233	if (!ccp_alg)
234		return -ENOMEM;
235
236	INIT_LIST_HEAD(&ccp_alg->entry);
237
238	alg = &ccp_alg->alg;
239
240	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
241	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
242		 def->drv_name);
243	alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
244			 CRYPTO_ALG_KERN_DRIVER_ONLY |
245			 CRYPTO_ALG_NEED_FALLBACK;
246	alg->cra_blocksize = AES_BLOCK_SIZE;
247	alg->cra_ctxsize = sizeof(struct ccp_ctx);
248	alg->cra_priority = CCP_CRA_PRIORITY;
249	alg->cra_type = &crypto_ablkcipher_type;
250	alg->cra_ablkcipher.setkey = ccp_aes_xts_setkey;
251	alg->cra_ablkcipher.encrypt = ccp_aes_xts_encrypt;
252	alg->cra_ablkcipher.decrypt = ccp_aes_xts_decrypt;
253	alg->cra_ablkcipher.min_keysize = AES_MIN_KEY_SIZE * 2;
254	alg->cra_ablkcipher.max_keysize = AES_MAX_KEY_SIZE * 2;
255	alg->cra_ablkcipher.ivsize = AES_BLOCK_SIZE;
256	alg->cra_init = ccp_aes_xts_cra_init;
257	alg->cra_exit = ccp_aes_xts_cra_exit;
258	alg->cra_module = THIS_MODULE;
259
260	ret = crypto_register_alg(alg);
261	if (ret) {
262		pr_err("%s ablkcipher algorithm registration error (%d)\n",
263		       alg->cra_name, ret);
264		kfree(ccp_alg);
265		return ret;
266	}
267
268	list_add(&ccp_alg->entry, head);
269
270	return 0;
271}
272
273int ccp_register_aes_xts_algs(struct list_head *head)
274{
275	int i, ret;
276
277	for (i = 0; i < ARRAY_SIZE(aes_xts_algs); i++) {
278		ret = ccp_register_aes_xts_alg(head, &aes_xts_algs[i]);
279		if (ret)
280			return ret;
281	}
282
283	return 0;
284}
285