1 /*
2  * Support for Marvell's Cryptographic Engine and Security Accelerator (CESA)
3  * that can be found on the following platform: Orion, Kirkwood, Armada. This
4  * driver supports the TDMA engine on platforms on which it is available.
5  *
6  * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
7  * Author: Arnaud Ebalard <arno@natisbad.org>
8  *
9  * This work is based on an initial version written by
10  * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
11  *
12  * This program is free software; you can redistribute it and/or modify it
13  * under the terms of the GNU General Public License version 2 as published
14  * by the Free Software Foundation.
15  */
16 
17 #include <linux/delay.h>
18 #include <linux/genalloc.h>
19 #include <linux/interrupt.h>
20 #include <linux/io.h>
21 #include <linux/kthread.h>
22 #include <linux/mbus.h>
23 #include <linux/platform_device.h>
24 #include <linux/scatterlist.h>
25 #include <linux/slab.h>
26 #include <linux/module.h>
27 #include <linux/clk.h>
28 #include <linux/of.h>
29 #include <linux/of_platform.h>
30 #include <linux/of_irq.h>
31 
32 #include "cesa.h"
33 
34 static int allhwsupport = !IS_ENABLED(CONFIG_CRYPTO_DEV_MV_CESA);
35 module_param_named(allhwsupport, allhwsupport, int, 0444);
36 MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if overlaps with the mv_cesa driver)");
37 
38 struct mv_cesa_dev *cesa_dev;
39 
mv_cesa_dequeue_req_unlocked(struct mv_cesa_engine * engine)40 static void mv_cesa_dequeue_req_unlocked(struct mv_cesa_engine *engine)
41 {
42 	struct crypto_async_request *req, *backlog;
43 	struct mv_cesa_ctx *ctx;
44 
45 	spin_lock_bh(&cesa_dev->lock);
46 	backlog = crypto_get_backlog(&cesa_dev->queue);
47 	req = crypto_dequeue_request(&cesa_dev->queue);
48 	engine->req = req;
49 	spin_unlock_bh(&cesa_dev->lock);
50 
51 	if (!req)
52 		return;
53 
54 	if (backlog)
55 		backlog->complete(backlog, -EINPROGRESS);
56 
57 	ctx = crypto_tfm_ctx(req->tfm);
58 	ctx->ops->prepare(req, engine);
59 	ctx->ops->step(req);
60 }
61 
mv_cesa_int(int irq,void * priv)62 static irqreturn_t mv_cesa_int(int irq, void *priv)
63 {
64 	struct mv_cesa_engine *engine = priv;
65 	struct crypto_async_request *req;
66 	struct mv_cesa_ctx *ctx;
67 	u32 status, mask;
68 	irqreturn_t ret = IRQ_NONE;
69 
70 	while (true) {
71 		int res;
72 
73 		mask = mv_cesa_get_int_mask(engine);
74 		status = readl(engine->regs + CESA_SA_INT_STATUS);
75 
76 		if (!(status & mask))
77 			break;
78 
79 		/*
80 		 * TODO: avoid clearing the FPGA_INT_STATUS if this not
81 		 * relevant on some platforms.
82 		 */
83 		writel(~status, engine->regs + CESA_SA_FPGA_INT_STATUS);
84 		writel(~status, engine->regs + CESA_SA_INT_STATUS);
85 
86 		ret = IRQ_HANDLED;
87 		spin_lock_bh(&engine->lock);
88 		req = engine->req;
89 		spin_unlock_bh(&engine->lock);
90 		if (req) {
91 			ctx = crypto_tfm_ctx(req->tfm);
92 			res = ctx->ops->process(req, status & mask);
93 			if (res != -EINPROGRESS) {
94 				spin_lock_bh(&engine->lock);
95 				engine->req = NULL;
96 				mv_cesa_dequeue_req_unlocked(engine);
97 				spin_unlock_bh(&engine->lock);
98 				ctx->ops->cleanup(req);
99 				local_bh_disable();
100 				req->complete(req, res);
101 				local_bh_enable();
102 			} else {
103 				ctx->ops->step(req);
104 			}
105 		}
106 	}
107 
108 	return ret;
109 }
110 
mv_cesa_queue_req(struct crypto_async_request * req)111 int mv_cesa_queue_req(struct crypto_async_request *req)
112 {
113 	int ret;
114 	int i;
115 
116 	spin_lock_bh(&cesa_dev->lock);
117 	ret = crypto_enqueue_request(&cesa_dev->queue, req);
118 	spin_unlock_bh(&cesa_dev->lock);
119 
120 	if (ret != -EINPROGRESS)
121 		return ret;
122 
123 	for (i = 0; i < cesa_dev->caps->nengines; i++) {
124 		spin_lock_bh(&cesa_dev->engines[i].lock);
125 		if (!cesa_dev->engines[i].req)
126 			mv_cesa_dequeue_req_unlocked(&cesa_dev->engines[i]);
127 		spin_unlock_bh(&cesa_dev->engines[i].lock);
128 	}
129 
130 	return -EINPROGRESS;
131 }
132 
mv_cesa_add_algs(struct mv_cesa_dev * cesa)133 static int mv_cesa_add_algs(struct mv_cesa_dev *cesa)
134 {
135 	int ret;
136 	int i, j;
137 
138 	for (i = 0; i < cesa->caps->ncipher_algs; i++) {
139 		ret = crypto_register_alg(cesa->caps->cipher_algs[i]);
140 		if (ret)
141 			goto err_unregister_crypto;
142 	}
143 
144 	for (i = 0; i < cesa->caps->nahash_algs; i++) {
145 		ret = crypto_register_ahash(cesa->caps->ahash_algs[i]);
146 		if (ret)
147 			goto err_unregister_ahash;
148 	}
149 
150 	return 0;
151 
152 err_unregister_ahash:
153 	for (j = 0; j < i; j++)
154 		crypto_unregister_ahash(cesa->caps->ahash_algs[j]);
155 	i = cesa->caps->ncipher_algs;
156 
157 err_unregister_crypto:
158 	for (j = 0; j < i; j++)
159 		crypto_unregister_alg(cesa->caps->cipher_algs[j]);
160 
161 	return ret;
162 }
163 
mv_cesa_remove_algs(struct mv_cesa_dev * cesa)164 static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa)
165 {
166 	int i;
167 
168 	for (i = 0; i < cesa->caps->nahash_algs; i++)
169 		crypto_unregister_ahash(cesa->caps->ahash_algs[i]);
170 
171 	for (i = 0; i < cesa->caps->ncipher_algs; i++)
172 		crypto_unregister_alg(cesa->caps->cipher_algs[i]);
173 }
174 
175 static struct crypto_alg *orion_cipher_algs[] = {
176 	&mv_cesa_ecb_des_alg,
177 	&mv_cesa_cbc_des_alg,
178 	&mv_cesa_ecb_des3_ede_alg,
179 	&mv_cesa_cbc_des3_ede_alg,
180 	&mv_cesa_ecb_aes_alg,
181 	&mv_cesa_cbc_aes_alg,
182 };
183 
184 static struct ahash_alg *orion_ahash_algs[] = {
185 	&mv_md5_alg,
186 	&mv_sha1_alg,
187 	&mv_ahmac_md5_alg,
188 	&mv_ahmac_sha1_alg,
189 };
190 
191 static struct crypto_alg *armada_370_cipher_algs[] = {
192 	&mv_cesa_ecb_des_alg,
193 	&mv_cesa_cbc_des_alg,
194 	&mv_cesa_ecb_des3_ede_alg,
195 	&mv_cesa_cbc_des3_ede_alg,
196 	&mv_cesa_ecb_aes_alg,
197 	&mv_cesa_cbc_aes_alg,
198 };
199 
200 static struct ahash_alg *armada_370_ahash_algs[] = {
201 	&mv_md5_alg,
202 	&mv_sha1_alg,
203 	&mv_sha256_alg,
204 	&mv_ahmac_md5_alg,
205 	&mv_ahmac_sha1_alg,
206 	&mv_ahmac_sha256_alg,
207 };
208 
209 static const struct mv_cesa_caps orion_caps = {
210 	.nengines = 1,
211 	.cipher_algs = orion_cipher_algs,
212 	.ncipher_algs = ARRAY_SIZE(orion_cipher_algs),
213 	.ahash_algs = orion_ahash_algs,
214 	.nahash_algs = ARRAY_SIZE(orion_ahash_algs),
215 	.has_tdma = false,
216 };
217 
218 static const struct mv_cesa_caps kirkwood_caps = {
219 	.nengines = 1,
220 	.cipher_algs = orion_cipher_algs,
221 	.ncipher_algs = ARRAY_SIZE(orion_cipher_algs),
222 	.ahash_algs = orion_ahash_algs,
223 	.nahash_algs = ARRAY_SIZE(orion_ahash_algs),
224 	.has_tdma = true,
225 };
226 
227 static const struct mv_cesa_caps armada_370_caps = {
228 	.nengines = 1,
229 	.cipher_algs = armada_370_cipher_algs,
230 	.ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs),
231 	.ahash_algs = armada_370_ahash_algs,
232 	.nahash_algs = ARRAY_SIZE(armada_370_ahash_algs),
233 	.has_tdma = true,
234 };
235 
236 static const struct mv_cesa_caps armada_xp_caps = {
237 	.nengines = 2,
238 	.cipher_algs = armada_370_cipher_algs,
239 	.ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs),
240 	.ahash_algs = armada_370_ahash_algs,
241 	.nahash_algs = ARRAY_SIZE(armada_370_ahash_algs),
242 	.has_tdma = true,
243 };
244 
245 static const struct of_device_id mv_cesa_of_match_table[] = {
246 	{ .compatible = "marvell,orion-crypto", .data = &orion_caps },
247 	{ .compatible = "marvell,kirkwood-crypto", .data = &kirkwood_caps },
248 	{ .compatible = "marvell,dove-crypto", .data = &kirkwood_caps },
249 	{ .compatible = "marvell,armada-370-crypto", .data = &armada_370_caps },
250 	{ .compatible = "marvell,armada-xp-crypto", .data = &armada_xp_caps },
251 	{ .compatible = "marvell,armada-375-crypto", .data = &armada_xp_caps },
252 	{ .compatible = "marvell,armada-38x-crypto", .data = &armada_xp_caps },
253 	{}
254 };
255 MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table);
256 
257 static void
mv_cesa_conf_mbus_windows(struct mv_cesa_engine * engine,const struct mbus_dram_target_info * dram)258 mv_cesa_conf_mbus_windows(struct mv_cesa_engine *engine,
259 			  const struct mbus_dram_target_info *dram)
260 {
261 	void __iomem *iobase = engine->regs;
262 	int i;
263 
264 	for (i = 0; i < 4; i++) {
265 		writel(0, iobase + CESA_TDMA_WINDOW_CTRL(i));
266 		writel(0, iobase + CESA_TDMA_WINDOW_BASE(i));
267 	}
268 
269 	for (i = 0; i < dram->num_cs; i++) {
270 		const struct mbus_dram_window *cs = dram->cs + i;
271 
272 		writel(((cs->size - 1) & 0xffff0000) |
273 		       (cs->mbus_attr << 8) |
274 		       (dram->mbus_dram_target_id << 4) | 1,
275 		       iobase + CESA_TDMA_WINDOW_CTRL(i));
276 		writel(cs->base, iobase + CESA_TDMA_WINDOW_BASE(i));
277 	}
278 }
279 
mv_cesa_dev_dma_init(struct mv_cesa_dev * cesa)280 static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
281 {
282 	struct device *dev = cesa->dev;
283 	struct mv_cesa_dev_dma *dma;
284 
285 	if (!cesa->caps->has_tdma)
286 		return 0;
287 
288 	dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
289 	if (!dma)
290 		return -ENOMEM;
291 
292 	dma->tdma_desc_pool = dmam_pool_create("tdma_desc", dev,
293 					sizeof(struct mv_cesa_tdma_desc),
294 					16, 0);
295 	if (!dma->tdma_desc_pool)
296 		return -ENOMEM;
297 
298 	dma->op_pool = dmam_pool_create("cesa_op", dev,
299 					sizeof(struct mv_cesa_op_ctx), 16, 0);
300 	if (!dma->op_pool)
301 		return -ENOMEM;
302 
303 	dma->cache_pool = dmam_pool_create("cesa_cache", dev,
304 					   CESA_MAX_HASH_BLOCK_SIZE, 1, 0);
305 	if (!dma->cache_pool)
306 		return -ENOMEM;
307 
308 	dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0);
309 	if (!dma->padding_pool)
310 		return -ENOMEM;
311 
312 	cesa->dma = dma;
313 
314 	return 0;
315 }
316 
mv_cesa_get_sram(struct platform_device * pdev,int idx)317 static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
318 {
319 	struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
320 	struct mv_cesa_engine *engine = &cesa->engines[idx];
321 	const char *res_name = "sram";
322 	struct resource *res;
323 
324 	engine->pool = of_gen_pool_get(cesa->dev->of_node,
325 				       "marvell,crypto-srams", idx);
326 	if (engine->pool) {
327 		engine->sram = gen_pool_dma_alloc(engine->pool,
328 						  cesa->sram_size,
329 						  &engine->sram_dma);
330 		if (engine->sram)
331 			return 0;
332 
333 		engine->pool = NULL;
334 		return -ENOMEM;
335 	}
336 
337 	if (cesa->caps->nengines > 1) {
338 		if (!idx)
339 			res_name = "sram0";
340 		else
341 			res_name = "sram1";
342 	}
343 
344 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
345 					   res_name);
346 	if (!res || resource_size(res) < cesa->sram_size)
347 		return -EINVAL;
348 
349 	engine->sram = devm_ioremap_resource(cesa->dev, res);
350 	if (IS_ERR(engine->sram))
351 		return PTR_ERR(engine->sram);
352 
353 	engine->sram_dma = phys_to_dma(cesa->dev,
354 				       (phys_addr_t)res->start);
355 
356 	return 0;
357 }
358 
mv_cesa_put_sram(struct platform_device * pdev,int idx)359 static void mv_cesa_put_sram(struct platform_device *pdev, int idx)
360 {
361 	struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
362 	struct mv_cesa_engine *engine = &cesa->engines[idx];
363 
364 	if (!engine->pool)
365 		return;
366 
367 	gen_pool_free(engine->pool, (unsigned long)engine->sram,
368 		      cesa->sram_size);
369 }
370 
mv_cesa_probe(struct platform_device * pdev)371 static int mv_cesa_probe(struct platform_device *pdev)
372 {
373 	const struct mv_cesa_caps *caps = &orion_caps;
374 	const struct mbus_dram_target_info *dram;
375 	const struct of_device_id *match;
376 	struct device *dev = &pdev->dev;
377 	struct mv_cesa_dev *cesa;
378 	struct mv_cesa_engine *engines;
379 	struct resource *res;
380 	int irq, ret, i;
381 	u32 sram_size;
382 
383 	if (cesa_dev) {
384 		dev_err(&pdev->dev, "Only one CESA device authorized\n");
385 		return -EEXIST;
386 	}
387 
388 	if (dev->of_node) {
389 		match = of_match_node(mv_cesa_of_match_table, dev->of_node);
390 		if (!match || !match->data)
391 			return -ENOTSUPP;
392 
393 		caps = match->data;
394 	}
395 
396 	if ((caps == &orion_caps || caps == &kirkwood_caps) && !allhwsupport)
397 		return -ENOTSUPP;
398 
399 	cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL);
400 	if (!cesa)
401 		return -ENOMEM;
402 
403 	cesa->caps = caps;
404 	cesa->dev = dev;
405 
406 	sram_size = CESA_SA_DEFAULT_SRAM_SIZE;
407 	of_property_read_u32(cesa->dev->of_node, "marvell,crypto-sram-size",
408 			     &sram_size);
409 	if (sram_size < CESA_SA_MIN_SRAM_SIZE)
410 		sram_size = CESA_SA_MIN_SRAM_SIZE;
411 
412 	cesa->sram_size = sram_size;
413 	cesa->engines = devm_kzalloc(dev, caps->nengines * sizeof(*engines),
414 				     GFP_KERNEL);
415 	if (!cesa->engines)
416 		return -ENOMEM;
417 
418 	spin_lock_init(&cesa->lock);
419 	crypto_init_queue(&cesa->queue, 50);
420 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
421 	cesa->regs = devm_ioremap_resource(dev, res);
422 	if (IS_ERR(cesa->regs))
423 		return PTR_ERR(cesa->regs);
424 
425 	ret = mv_cesa_dev_dma_init(cesa);
426 	if (ret)
427 		return ret;
428 
429 	dram = mv_mbus_dram_info_nooverlap();
430 
431 	platform_set_drvdata(pdev, cesa);
432 
433 	for (i = 0; i < caps->nengines; i++) {
434 		struct mv_cesa_engine *engine = &cesa->engines[i];
435 		char res_name[7];
436 
437 		engine->id = i;
438 		spin_lock_init(&engine->lock);
439 
440 		ret = mv_cesa_get_sram(pdev, i);
441 		if (ret)
442 			goto err_cleanup;
443 
444 		irq = platform_get_irq(pdev, i);
445 		if (irq < 0) {
446 			ret = irq;
447 			goto err_cleanup;
448 		}
449 
450 		/*
451 		 * Not all platforms can gate the CESA clocks: do not complain
452 		 * if the clock does not exist.
453 		 */
454 		snprintf(res_name, sizeof(res_name), "cesa%d", i);
455 		engine->clk = devm_clk_get(dev, res_name);
456 		if (IS_ERR(engine->clk)) {
457 			engine->clk = devm_clk_get(dev, NULL);
458 			if (IS_ERR(engine->clk))
459 				engine->clk = NULL;
460 		}
461 
462 		snprintf(res_name, sizeof(res_name), "cesaz%d", i);
463 		engine->zclk = devm_clk_get(dev, res_name);
464 		if (IS_ERR(engine->zclk))
465 			engine->zclk = NULL;
466 
467 		ret = clk_prepare_enable(engine->clk);
468 		if (ret)
469 			goto err_cleanup;
470 
471 		ret = clk_prepare_enable(engine->zclk);
472 		if (ret)
473 			goto err_cleanup;
474 
475 		engine->regs = cesa->regs + CESA_ENGINE_OFF(i);
476 
477 		if (dram && cesa->caps->has_tdma)
478 			mv_cesa_conf_mbus_windows(&cesa->engines[i], dram);
479 
480 		writel(0, cesa->engines[i].regs + CESA_SA_INT_STATUS);
481 		writel(CESA_SA_CFG_STOP_DIG_ERR,
482 		       cesa->engines[i].regs + CESA_SA_CFG);
483 		writel(engine->sram_dma & CESA_SA_SRAM_MSK,
484 		       cesa->engines[i].regs + CESA_SA_DESC_P0);
485 
486 		ret = devm_request_threaded_irq(dev, irq, NULL, mv_cesa_int,
487 						IRQF_ONESHOT,
488 						dev_name(&pdev->dev),
489 						&cesa->engines[i]);
490 		if (ret)
491 			goto err_cleanup;
492 	}
493 
494 	cesa_dev = cesa;
495 
496 	ret = mv_cesa_add_algs(cesa);
497 	if (ret) {
498 		cesa_dev = NULL;
499 		goto err_cleanup;
500 	}
501 
502 	dev_info(dev, "CESA device successfully registered\n");
503 
504 	return 0;
505 
506 err_cleanup:
507 	for (i = 0; i < caps->nengines; i++) {
508 		clk_disable_unprepare(cesa->engines[i].zclk);
509 		clk_disable_unprepare(cesa->engines[i].clk);
510 		mv_cesa_put_sram(pdev, i);
511 	}
512 
513 	return ret;
514 }
515 
mv_cesa_remove(struct platform_device * pdev)516 static int mv_cesa_remove(struct platform_device *pdev)
517 {
518 	struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
519 	int i;
520 
521 	mv_cesa_remove_algs(cesa);
522 
523 	for (i = 0; i < cesa->caps->nengines; i++) {
524 		clk_disable_unprepare(cesa->engines[i].zclk);
525 		clk_disable_unprepare(cesa->engines[i].clk);
526 		mv_cesa_put_sram(pdev, i);
527 	}
528 
529 	return 0;
530 }
531 
532 static struct platform_driver marvell_cesa = {
533 	.probe		= mv_cesa_probe,
534 	.remove		= mv_cesa_remove,
535 	.driver		= {
536 		.name	= "marvell-cesa",
537 		.of_match_table = mv_cesa_of_match_table,
538 	},
539 };
540 module_platform_driver(marvell_cesa);
541 
542 MODULE_ALIAS("platform:mv_crypto");
543 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
544 MODULE_AUTHOR("Arnaud Ebalard <arno@natisbad.org>");
545 MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
546 MODULE_LICENSE("GPL v2");
547