1/*
2 * Cryptographic API.
3 *
4 * Support for ATMEL AES HW acceleration.
5 *
6 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
7 * Author: Nicolas Royer <nicolas@eukrea.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 *
13 * Some ideas are from omap-aes.c driver.
14 */
15
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/clk.h>
22#include <linux/io.h>
23#include <linux/hw_random.h>
24#include <linux/platform_device.h>
25
26#include <linux/device.h>
27#include <linux/init.h>
28#include <linux/errno.h>
29#include <linux/interrupt.h>
30#include <linux/irq.h>
31#include <linux/scatterlist.h>
32#include <linux/dma-mapping.h>
33#include <linux/of_device.h>
34#include <linux/delay.h>
35#include <linux/crypto.h>
36#include <linux/cryptohash.h>
37#include <crypto/scatterwalk.h>
38#include <crypto/algapi.h>
39#include <crypto/aes.h>
40#include <crypto/hash.h>
41#include <crypto/internal/hash.h>
42#include <linux/platform_data/crypto-atmel.h>
43#include <dt-bindings/dma/at91.h>
44#include "atmel-aes-regs.h"
45
46#define CFB8_BLOCK_SIZE		1
47#define CFB16_BLOCK_SIZE	2
48#define CFB32_BLOCK_SIZE	4
49#define CFB64_BLOCK_SIZE	8
50
51/* AES flags */
52#define AES_FLAGS_MODE_MASK	0x03ff
53#define AES_FLAGS_ENCRYPT	BIT(0)
54#define AES_FLAGS_CBC		BIT(1)
55#define AES_FLAGS_CFB		BIT(2)
56#define AES_FLAGS_CFB8		BIT(3)
57#define AES_FLAGS_CFB16		BIT(4)
58#define AES_FLAGS_CFB32		BIT(5)
59#define AES_FLAGS_CFB64		BIT(6)
60#define AES_FLAGS_CFB128	BIT(7)
61#define AES_FLAGS_OFB		BIT(8)
62#define AES_FLAGS_CTR		BIT(9)
63
64#define AES_FLAGS_INIT		BIT(16)
65#define AES_FLAGS_DMA		BIT(17)
66#define AES_FLAGS_BUSY		BIT(18)
67#define AES_FLAGS_FAST		BIT(19)
68
69#define ATMEL_AES_QUEUE_LENGTH	50
70
71#define ATMEL_AES_DMA_THRESHOLD		16
72
73
74struct atmel_aes_caps {
75	bool	has_dualbuff;
76	bool	has_cfb64;
77	u32		max_burst_size;
78};
79
80struct atmel_aes_dev;
81
82struct atmel_aes_ctx {
83	struct atmel_aes_dev *dd;
84
85	int		keylen;
86	u32		key[AES_KEYSIZE_256 / sizeof(u32)];
87
88	u16		block_size;
89};
90
91struct atmel_aes_reqctx {
92	unsigned long mode;
93};
94
95struct atmel_aes_dma {
96	struct dma_chan			*chan;
97	struct dma_slave_config dma_conf;
98};
99
100struct atmel_aes_dev {
101	struct list_head	list;
102	unsigned long		phys_base;
103	void __iomem		*io_base;
104
105	struct atmel_aes_ctx	*ctx;
106	struct device		*dev;
107	struct clk		*iclk;
108	int	irq;
109
110	unsigned long		flags;
111	int	err;
112
113	spinlock_t		lock;
114	struct crypto_queue	queue;
115
116	struct tasklet_struct	done_task;
117	struct tasklet_struct	queue_task;
118
119	struct ablkcipher_request	*req;
120	size_t	total;
121
122	struct scatterlist	*in_sg;
123	unsigned int		nb_in_sg;
124	size_t				in_offset;
125	struct scatterlist	*out_sg;
126	unsigned int		nb_out_sg;
127	size_t				out_offset;
128
129	size_t	bufcnt;
130	size_t	buflen;
131	size_t	dma_size;
132
133	void	*buf_in;
134	int		dma_in;
135	dma_addr_t	dma_addr_in;
136	struct atmel_aes_dma	dma_lch_in;
137
138	void	*buf_out;
139	int		dma_out;
140	dma_addr_t	dma_addr_out;
141	struct atmel_aes_dma	dma_lch_out;
142
143	struct atmel_aes_caps	caps;
144
145	u32	hw_version;
146};
147
148struct atmel_aes_drv {
149	struct list_head	dev_list;
150	spinlock_t		lock;
151};
152
153static struct atmel_aes_drv atmel_aes = {
154	.dev_list = LIST_HEAD_INIT(atmel_aes.dev_list),
155	.lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
156};
157
158static int atmel_aes_sg_length(struct ablkcipher_request *req,
159			struct scatterlist *sg)
160{
161	unsigned int total = req->nbytes;
162	int sg_nb;
163	unsigned int len;
164	struct scatterlist *sg_list;
165
166	sg_nb = 0;
167	sg_list = sg;
168	total = req->nbytes;
169
170	while (total) {
171		len = min(sg_list->length, total);
172
173		sg_nb++;
174		total -= len;
175
176		sg_list = sg_next(sg_list);
177		if (!sg_list)
178			total = 0;
179	}
180
181	return sg_nb;
182}
183
184static int atmel_aes_sg_copy(struct scatterlist **sg, size_t *offset,
185			void *buf, size_t buflen, size_t total, int out)
186{
187	unsigned int count, off = 0;
188
189	while (buflen && total) {
190		count = min((*sg)->length - *offset, total);
191		count = min(count, buflen);
192
193		if (!count)
194			return off;
195
196		scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out);
197
198		off += count;
199		buflen -= count;
200		*offset += count;
201		total -= count;
202
203		if (*offset == (*sg)->length) {
204			*sg = sg_next(*sg);
205			if (*sg)
206				*offset = 0;
207			else
208				total = 0;
209		}
210	}
211
212	return off;
213}
214
215static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
216{
217	return readl_relaxed(dd->io_base + offset);
218}
219
220static inline void atmel_aes_write(struct atmel_aes_dev *dd,
221					u32 offset, u32 value)
222{
223	writel_relaxed(value, dd->io_base + offset);
224}
225
226static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
227					u32 *value, int count)
228{
229	for (; count--; value++, offset += 4)
230		*value = atmel_aes_read(dd, offset);
231}
232
233static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
234					u32 *value, int count)
235{
236	for (; count--; value++, offset += 4)
237		atmel_aes_write(dd, offset, *value);
238}
239
240static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx)
241{
242	struct atmel_aes_dev *aes_dd = NULL;
243	struct atmel_aes_dev *tmp;
244
245	spin_lock_bh(&atmel_aes.lock);
246	if (!ctx->dd) {
247		list_for_each_entry(tmp, &atmel_aes.dev_list, list) {
248			aes_dd = tmp;
249			break;
250		}
251		ctx->dd = aes_dd;
252	} else {
253		aes_dd = ctx->dd;
254	}
255
256	spin_unlock_bh(&atmel_aes.lock);
257
258	return aes_dd;
259}
260
261static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
262{
263	int err;
264
265	err = clk_prepare_enable(dd->iclk);
266	if (err)
267		return err;
268
269	if (!(dd->flags & AES_FLAGS_INIT)) {
270		atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
271		atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
272		dd->flags |= AES_FLAGS_INIT;
273		dd->err = 0;
274	}
275
276	return 0;
277}
278
279static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd)
280{
281	return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff;
282}
283
284static void atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
285{
286	atmel_aes_hw_init(dd);
287
288	dd->hw_version = atmel_aes_get_version(dd);
289
290	dev_info(dd->dev,
291			"version: 0x%x\n", dd->hw_version);
292
293	clk_disable_unprepare(dd->iclk);
294}
295
296static void atmel_aes_finish_req(struct atmel_aes_dev *dd, int err)
297{
298	struct ablkcipher_request *req = dd->req;
299
300	clk_disable_unprepare(dd->iclk);
301	dd->flags &= ~AES_FLAGS_BUSY;
302
303	req->base.complete(&req->base, err);
304}
305
306static void atmel_aes_dma_callback(void *data)
307{
308	struct atmel_aes_dev *dd = data;
309
310	/* dma_lch_out - completed */
311	tasklet_schedule(&dd->done_task);
312}
313
314static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd,
315		dma_addr_t dma_addr_in, dma_addr_t dma_addr_out, int length)
316{
317	struct scatterlist sg[2];
318	struct dma_async_tx_descriptor	*in_desc, *out_desc;
319
320	dd->dma_size = length;
321
322	dma_sync_single_for_device(dd->dev, dma_addr_in, length,
323				   DMA_TO_DEVICE);
324	dma_sync_single_for_device(dd->dev, dma_addr_out, length,
325				   DMA_FROM_DEVICE);
326
327	if (dd->flags & AES_FLAGS_CFB8) {
328		dd->dma_lch_in.dma_conf.dst_addr_width =
329			DMA_SLAVE_BUSWIDTH_1_BYTE;
330		dd->dma_lch_out.dma_conf.src_addr_width =
331			DMA_SLAVE_BUSWIDTH_1_BYTE;
332	} else if (dd->flags & AES_FLAGS_CFB16) {
333		dd->dma_lch_in.dma_conf.dst_addr_width =
334			DMA_SLAVE_BUSWIDTH_2_BYTES;
335		dd->dma_lch_out.dma_conf.src_addr_width =
336			DMA_SLAVE_BUSWIDTH_2_BYTES;
337	} else {
338		dd->dma_lch_in.dma_conf.dst_addr_width =
339			DMA_SLAVE_BUSWIDTH_4_BYTES;
340		dd->dma_lch_out.dma_conf.src_addr_width =
341			DMA_SLAVE_BUSWIDTH_4_BYTES;
342	}
343
344	if (dd->flags & (AES_FLAGS_CFB8 | AES_FLAGS_CFB16 |
345			AES_FLAGS_CFB32 | AES_FLAGS_CFB64)) {
346		dd->dma_lch_in.dma_conf.src_maxburst = 1;
347		dd->dma_lch_in.dma_conf.dst_maxburst = 1;
348		dd->dma_lch_out.dma_conf.src_maxburst = 1;
349		dd->dma_lch_out.dma_conf.dst_maxburst = 1;
350	} else {
351		dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
352		dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
353		dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
354		dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
355	}
356
357	dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
358	dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
359
360	dd->flags |= AES_FLAGS_DMA;
361
362	sg_init_table(&sg[0], 1);
363	sg_dma_address(&sg[0]) = dma_addr_in;
364	sg_dma_len(&sg[0]) = length;
365
366	sg_init_table(&sg[1], 1);
367	sg_dma_address(&sg[1]) = dma_addr_out;
368	sg_dma_len(&sg[1]) = length;
369
370	in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
371				1, DMA_MEM_TO_DEV,
372				DMA_PREP_INTERRUPT  |  DMA_CTRL_ACK);
373	if (!in_desc)
374		return -EINVAL;
375
376	out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
377				1, DMA_DEV_TO_MEM,
378				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
379	if (!out_desc)
380		return -EINVAL;
381
382	out_desc->callback = atmel_aes_dma_callback;
383	out_desc->callback_param = dd;
384
385	dmaengine_submit(out_desc);
386	dma_async_issue_pending(dd->dma_lch_out.chan);
387
388	dmaengine_submit(in_desc);
389	dma_async_issue_pending(dd->dma_lch_in.chan);
390
391	return 0;
392}
393
394static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd)
395{
396	dd->flags &= ~AES_FLAGS_DMA;
397
398	dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in,
399				dd->dma_size, DMA_TO_DEVICE);
400	dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out,
401				dd->dma_size, DMA_FROM_DEVICE);
402
403	/* use cache buffers */
404	dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg);
405	if (!dd->nb_in_sg)
406		return -EINVAL;
407
408	dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg);
409	if (!dd->nb_out_sg)
410		return -EINVAL;
411
412	dd->bufcnt = sg_copy_to_buffer(dd->in_sg, dd->nb_in_sg,
413					dd->buf_in, dd->total);
414
415	if (!dd->bufcnt)
416		return -EINVAL;
417
418	dd->total -= dd->bufcnt;
419
420	atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
421	atmel_aes_write_n(dd, AES_IDATAR(0), (u32 *) dd->buf_in,
422				dd->bufcnt >> 2);
423
424	return 0;
425}
426
427static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd)
428{
429	int err, fast = 0, in, out;
430	size_t count;
431	dma_addr_t addr_in, addr_out;
432
433	if ((!dd->in_offset) && (!dd->out_offset)) {
434		/* check for alignment */
435		in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
436			IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
437		out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
438			IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
439		fast = in && out;
440
441		if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
442			fast = 0;
443	}
444
445
446	if (fast)  {
447		count = min(dd->total, sg_dma_len(dd->in_sg));
448		count = min(count, sg_dma_len(dd->out_sg));
449
450		err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
451		if (!err) {
452			dev_err(dd->dev, "dma_map_sg() error\n");
453			return -EINVAL;
454		}
455
456		err = dma_map_sg(dd->dev, dd->out_sg, 1,
457				DMA_FROM_DEVICE);
458		if (!err) {
459			dev_err(dd->dev, "dma_map_sg() error\n");
460			dma_unmap_sg(dd->dev, dd->in_sg, 1,
461				DMA_TO_DEVICE);
462			return -EINVAL;
463		}
464
465		addr_in = sg_dma_address(dd->in_sg);
466		addr_out = sg_dma_address(dd->out_sg);
467
468		dd->flags |= AES_FLAGS_FAST;
469
470	} else {
471		dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in,
472					dd->dma_size, DMA_TO_DEVICE);
473
474		/* use cache buffers */
475		count = atmel_aes_sg_copy(&dd->in_sg, &dd->in_offset,
476				dd->buf_in, dd->buflen, dd->total, 0);
477
478		addr_in = dd->dma_addr_in;
479		addr_out = dd->dma_addr_out;
480
481		dd->flags &= ~AES_FLAGS_FAST;
482	}
483
484	dd->total -= count;
485
486	err = atmel_aes_crypt_dma(dd, addr_in, addr_out, count);
487
488	if (err && (dd->flags & AES_FLAGS_FAST)) {
489		dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
490		dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
491	}
492
493	return err;
494}
495
496static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd)
497{
498	int err;
499	u32 valcr = 0, valmr = 0;
500
501	err = atmel_aes_hw_init(dd);
502
503	if (err)
504		return err;
505
506	/* MR register must be set before IV registers */
507	if (dd->ctx->keylen == AES_KEYSIZE_128)
508		valmr |= AES_MR_KEYSIZE_128;
509	else if (dd->ctx->keylen == AES_KEYSIZE_192)
510		valmr |= AES_MR_KEYSIZE_192;
511	else
512		valmr |= AES_MR_KEYSIZE_256;
513
514	if (dd->flags & AES_FLAGS_CBC) {
515		valmr |= AES_MR_OPMOD_CBC;
516	} else if (dd->flags & AES_FLAGS_CFB) {
517		valmr |= AES_MR_OPMOD_CFB;
518		if (dd->flags & AES_FLAGS_CFB8)
519			valmr |= AES_MR_CFBS_8b;
520		else if (dd->flags & AES_FLAGS_CFB16)
521			valmr |= AES_MR_CFBS_16b;
522		else if (dd->flags & AES_FLAGS_CFB32)
523			valmr |= AES_MR_CFBS_32b;
524		else if (dd->flags & AES_FLAGS_CFB64)
525			valmr |= AES_MR_CFBS_64b;
526		else if (dd->flags & AES_FLAGS_CFB128)
527			valmr |= AES_MR_CFBS_128b;
528	} else if (dd->flags & AES_FLAGS_OFB) {
529		valmr |= AES_MR_OPMOD_OFB;
530	} else if (dd->flags & AES_FLAGS_CTR) {
531		valmr |= AES_MR_OPMOD_CTR;
532	} else {
533		valmr |= AES_MR_OPMOD_ECB;
534	}
535
536	if (dd->flags & AES_FLAGS_ENCRYPT)
537		valmr |= AES_MR_CYPHER_ENC;
538
539	if (dd->total > ATMEL_AES_DMA_THRESHOLD) {
540		valmr |= AES_MR_SMOD_IDATAR0;
541		if (dd->caps.has_dualbuff)
542			valmr |= AES_MR_DUALBUFF;
543	} else {
544		valmr |= AES_MR_SMOD_AUTO;
545	}
546
547	atmel_aes_write(dd, AES_CR, valcr);
548	atmel_aes_write(dd, AES_MR, valmr);
549
550	atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
551						dd->ctx->keylen >> 2);
552
553	if (((dd->flags & AES_FLAGS_CBC) || (dd->flags & AES_FLAGS_CFB) ||
554	   (dd->flags & AES_FLAGS_OFB) || (dd->flags & AES_FLAGS_CTR)) &&
555	   dd->req->info) {
556		atmel_aes_write_n(dd, AES_IVR(0), dd->req->info, 4);
557	}
558
559	return 0;
560}
561
562static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
563			       struct ablkcipher_request *req)
564{
565	struct crypto_async_request *async_req, *backlog;
566	struct atmel_aes_ctx *ctx;
567	struct atmel_aes_reqctx *rctx;
568	unsigned long flags;
569	int err, ret = 0;
570
571	spin_lock_irqsave(&dd->lock, flags);
572	if (req)
573		ret = ablkcipher_enqueue_request(&dd->queue, req);
574	if (dd->flags & AES_FLAGS_BUSY) {
575		spin_unlock_irqrestore(&dd->lock, flags);
576		return ret;
577	}
578	backlog = crypto_get_backlog(&dd->queue);
579	async_req = crypto_dequeue_request(&dd->queue);
580	if (async_req)
581		dd->flags |= AES_FLAGS_BUSY;
582	spin_unlock_irqrestore(&dd->lock, flags);
583
584	if (!async_req)
585		return ret;
586
587	if (backlog)
588		backlog->complete(backlog, -EINPROGRESS);
589
590	req = ablkcipher_request_cast(async_req);
591
592	/* assign new request to device */
593	dd->req = req;
594	dd->total = req->nbytes;
595	dd->in_offset = 0;
596	dd->in_sg = req->src;
597	dd->out_offset = 0;
598	dd->out_sg = req->dst;
599
600	rctx = ablkcipher_request_ctx(req);
601	ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
602	rctx->mode &= AES_FLAGS_MODE_MASK;
603	dd->flags = (dd->flags & ~AES_FLAGS_MODE_MASK) | rctx->mode;
604	dd->ctx = ctx;
605	ctx->dd = dd;
606
607	err = atmel_aes_write_ctrl(dd);
608	if (!err) {
609		if (dd->total > ATMEL_AES_DMA_THRESHOLD)
610			err = atmel_aes_crypt_dma_start(dd);
611		else
612			err = atmel_aes_crypt_cpu_start(dd);
613	}
614	if (err) {
615		/* aes_task will not finish it, so do it here */
616		atmel_aes_finish_req(dd, err);
617		tasklet_schedule(&dd->queue_task);
618	}
619
620	return ret;
621}
622
623static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd)
624{
625	int err = -EINVAL;
626	size_t count;
627
628	if (dd->flags & AES_FLAGS_DMA) {
629		err = 0;
630		if  (dd->flags & AES_FLAGS_FAST) {
631			dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
632			dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
633		} else {
634			dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out,
635				dd->dma_size, DMA_FROM_DEVICE);
636
637			/* copy data */
638			count = atmel_aes_sg_copy(&dd->out_sg, &dd->out_offset,
639				dd->buf_out, dd->buflen, dd->dma_size, 1);
640			if (count != dd->dma_size) {
641				err = -EINVAL;
642				pr_err("not all data converted: %u\n", count);
643			}
644		}
645	}
646
647	return err;
648}
649
650
651static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
652{
653	int err = -ENOMEM;
654
655	dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
656	dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
657	dd->buflen = PAGE_SIZE;
658	dd->buflen &= ~(AES_BLOCK_SIZE - 1);
659
660	if (!dd->buf_in || !dd->buf_out) {
661		dev_err(dd->dev, "unable to alloc pages.\n");
662		goto err_alloc;
663	}
664
665	/* MAP here */
666	dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
667					dd->buflen, DMA_TO_DEVICE);
668	if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
669		dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
670		err = -EINVAL;
671		goto err_map_in;
672	}
673
674	dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
675					dd->buflen, DMA_FROM_DEVICE);
676	if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
677		dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
678		err = -EINVAL;
679		goto err_map_out;
680	}
681
682	return 0;
683
684err_map_out:
685	dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
686		DMA_TO_DEVICE);
687err_map_in:
688err_alloc:
689	free_page((unsigned long)dd->buf_out);
690	free_page((unsigned long)dd->buf_in);
691	if (err)
692		pr_err("error: %d\n", err);
693	return err;
694}
695
696static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
697{
698	dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
699			 DMA_FROM_DEVICE);
700	dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
701		DMA_TO_DEVICE);
702	free_page((unsigned long)dd->buf_out);
703	free_page((unsigned long)dd->buf_in);
704}
705
706static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
707{
708	struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(
709			crypto_ablkcipher_reqtfm(req));
710	struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
711	struct atmel_aes_dev *dd;
712
713	if (mode & AES_FLAGS_CFB8) {
714		if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) {
715			pr_err("request size is not exact amount of CFB8 blocks\n");
716			return -EINVAL;
717		}
718		ctx->block_size = CFB8_BLOCK_SIZE;
719	} else if (mode & AES_FLAGS_CFB16) {
720		if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) {
721			pr_err("request size is not exact amount of CFB16 blocks\n");
722			return -EINVAL;
723		}
724		ctx->block_size = CFB16_BLOCK_SIZE;
725	} else if (mode & AES_FLAGS_CFB32) {
726		if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) {
727			pr_err("request size is not exact amount of CFB32 blocks\n");
728			return -EINVAL;
729		}
730		ctx->block_size = CFB32_BLOCK_SIZE;
731	} else if (mode & AES_FLAGS_CFB64) {
732		if (!IS_ALIGNED(req->nbytes, CFB64_BLOCK_SIZE)) {
733			pr_err("request size is not exact amount of CFB64 blocks\n");
734			return -EINVAL;
735		}
736		ctx->block_size = CFB64_BLOCK_SIZE;
737	} else {
738		if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
739			pr_err("request size is not exact amount of AES blocks\n");
740			return -EINVAL;
741		}
742		ctx->block_size = AES_BLOCK_SIZE;
743	}
744
745	dd = atmel_aes_find_dev(ctx);
746	if (!dd)
747		return -ENODEV;
748
749	rctx->mode = mode;
750
751	return atmel_aes_handle_queue(dd, req);
752}
753
754static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
755{
756	struct at_dma_slave	*sl = slave;
757
758	if (sl && sl->dma_dev == chan->device->dev) {
759		chan->private = sl;
760		return true;
761	} else {
762		return false;
763	}
764}
765
766static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
767	struct crypto_platform_data *pdata)
768{
769	int err = -ENOMEM;
770	dma_cap_mask_t mask;
771
772	dma_cap_zero(mask);
773	dma_cap_set(DMA_SLAVE, mask);
774
775	/* Try to grab 2 DMA channels */
776	dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask,
777			atmel_aes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
778	if (!dd->dma_lch_in.chan)
779		goto err_dma_in;
780
781	dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
782	dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
783		AES_IDATAR(0);
784	dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
785	dd->dma_lch_in.dma_conf.src_addr_width =
786		DMA_SLAVE_BUSWIDTH_4_BYTES;
787	dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
788	dd->dma_lch_in.dma_conf.dst_addr_width =
789		DMA_SLAVE_BUSWIDTH_4_BYTES;
790	dd->dma_lch_in.dma_conf.device_fc = false;
791
792	dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask,
793			atmel_aes_filter, &pdata->dma_slave->txdata, dd->dev, "rx");
794	if (!dd->dma_lch_out.chan)
795		goto err_dma_out;
796
797	dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
798	dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
799		AES_ODATAR(0);
800	dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
801	dd->dma_lch_out.dma_conf.src_addr_width =
802		DMA_SLAVE_BUSWIDTH_4_BYTES;
803	dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
804	dd->dma_lch_out.dma_conf.dst_addr_width =
805		DMA_SLAVE_BUSWIDTH_4_BYTES;
806	dd->dma_lch_out.dma_conf.device_fc = false;
807
808	return 0;
809
810err_dma_out:
811	dma_release_channel(dd->dma_lch_in.chan);
812err_dma_in:
813	dev_warn(dd->dev, "no DMA channel available\n");
814	return err;
815}
816
817static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
818{
819	dma_release_channel(dd->dma_lch_in.chan);
820	dma_release_channel(dd->dma_lch_out.chan);
821}
822
823static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
824			   unsigned int keylen)
825{
826	struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
827
828	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
829		   keylen != AES_KEYSIZE_256) {
830		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
831		return -EINVAL;
832	}
833
834	memcpy(ctx->key, key, keylen);
835	ctx->keylen = keylen;
836
837	return 0;
838}
839
840static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req)
841{
842	return atmel_aes_crypt(req,
843		AES_FLAGS_ENCRYPT);
844}
845
846static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req)
847{
848	return atmel_aes_crypt(req,
849		0);
850}
851
852static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req)
853{
854	return atmel_aes_crypt(req,
855		AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
856}
857
858static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req)
859{
860	return atmel_aes_crypt(req,
861		AES_FLAGS_CBC);
862}
863
864static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req)
865{
866	return atmel_aes_crypt(req,
867		AES_FLAGS_ENCRYPT | AES_FLAGS_OFB);
868}
869
870static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
871{
872	return atmel_aes_crypt(req,
873		AES_FLAGS_OFB);
874}
875
876static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
877{
878	return atmel_aes_crypt(req,
879		AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB128);
880}
881
882static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
883{
884	return atmel_aes_crypt(req,
885		AES_FLAGS_CFB | AES_FLAGS_CFB128);
886}
887
888static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
889{
890	return atmel_aes_crypt(req,
891		AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB64);
892}
893
894static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req)
895{
896	return atmel_aes_crypt(req,
897		AES_FLAGS_CFB | AES_FLAGS_CFB64);
898}
899
900static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req)
901{
902	return atmel_aes_crypt(req,
903		AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB32);
904}
905
906static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req)
907{
908	return atmel_aes_crypt(req,
909		AES_FLAGS_CFB | AES_FLAGS_CFB32);
910}
911
912static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req)
913{
914	return atmel_aes_crypt(req,
915		AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB16);
916}
917
918static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req)
919{
920	return atmel_aes_crypt(req,
921		AES_FLAGS_CFB | AES_FLAGS_CFB16);
922}
923
924static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req)
925{
926	return atmel_aes_crypt(req,
927		AES_FLAGS_ENCRYPT |	AES_FLAGS_CFB | AES_FLAGS_CFB8);
928}
929
930static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req)
931{
932	return atmel_aes_crypt(req,
933		AES_FLAGS_CFB | AES_FLAGS_CFB8);
934}
935
936static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req)
937{
938	return atmel_aes_crypt(req,
939		AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
940}
941
942static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req)
943{
944	return atmel_aes_crypt(req,
945		AES_FLAGS_CTR);
946}
947
948static int atmel_aes_cra_init(struct crypto_tfm *tfm)
949{
950	tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
951
952	return 0;
953}
954
955static void atmel_aes_cra_exit(struct crypto_tfm *tfm)
956{
957}
958
959static struct crypto_alg aes_algs[] = {
960{
961	.cra_name		= "ecb(aes)",
962	.cra_driver_name	= "atmel-ecb-aes",
963	.cra_priority		= 100,
964	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
965	.cra_blocksize		= AES_BLOCK_SIZE,
966	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
967	.cra_alignmask		= 0xf,
968	.cra_type		= &crypto_ablkcipher_type,
969	.cra_module		= THIS_MODULE,
970	.cra_init		= atmel_aes_cra_init,
971	.cra_exit		= atmel_aes_cra_exit,
972	.cra_u.ablkcipher = {
973		.min_keysize	= AES_MIN_KEY_SIZE,
974		.max_keysize	= AES_MAX_KEY_SIZE,
975		.setkey		= atmel_aes_setkey,
976		.encrypt	= atmel_aes_ecb_encrypt,
977		.decrypt	= atmel_aes_ecb_decrypt,
978	}
979},
980{
981	.cra_name		= "cbc(aes)",
982	.cra_driver_name	= "atmel-cbc-aes",
983	.cra_priority		= 100,
984	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
985	.cra_blocksize		= AES_BLOCK_SIZE,
986	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
987	.cra_alignmask		= 0xf,
988	.cra_type		= &crypto_ablkcipher_type,
989	.cra_module		= THIS_MODULE,
990	.cra_init		= atmel_aes_cra_init,
991	.cra_exit		= atmel_aes_cra_exit,
992	.cra_u.ablkcipher = {
993		.min_keysize	= AES_MIN_KEY_SIZE,
994		.max_keysize	= AES_MAX_KEY_SIZE,
995		.ivsize		= AES_BLOCK_SIZE,
996		.setkey		= atmel_aes_setkey,
997		.encrypt	= atmel_aes_cbc_encrypt,
998		.decrypt	= atmel_aes_cbc_decrypt,
999	}
1000},
1001{
1002	.cra_name		= "ofb(aes)",
1003	.cra_driver_name	= "atmel-ofb-aes",
1004	.cra_priority		= 100,
1005	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1006	.cra_blocksize		= AES_BLOCK_SIZE,
1007	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
1008	.cra_alignmask		= 0xf,
1009	.cra_type		= &crypto_ablkcipher_type,
1010	.cra_module		= THIS_MODULE,
1011	.cra_init		= atmel_aes_cra_init,
1012	.cra_exit		= atmel_aes_cra_exit,
1013	.cra_u.ablkcipher = {
1014		.min_keysize	= AES_MIN_KEY_SIZE,
1015		.max_keysize	= AES_MAX_KEY_SIZE,
1016		.ivsize		= AES_BLOCK_SIZE,
1017		.setkey		= atmel_aes_setkey,
1018		.encrypt	= atmel_aes_ofb_encrypt,
1019		.decrypt	= atmel_aes_ofb_decrypt,
1020	}
1021},
1022{
1023	.cra_name		= "cfb(aes)",
1024	.cra_driver_name	= "atmel-cfb-aes",
1025	.cra_priority		= 100,
1026	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1027	.cra_blocksize		= AES_BLOCK_SIZE,
1028	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
1029	.cra_alignmask		= 0xf,
1030	.cra_type		= &crypto_ablkcipher_type,
1031	.cra_module		= THIS_MODULE,
1032	.cra_init		= atmel_aes_cra_init,
1033	.cra_exit		= atmel_aes_cra_exit,
1034	.cra_u.ablkcipher = {
1035		.min_keysize	= AES_MIN_KEY_SIZE,
1036		.max_keysize	= AES_MAX_KEY_SIZE,
1037		.ivsize		= AES_BLOCK_SIZE,
1038		.setkey		= atmel_aes_setkey,
1039		.encrypt	= atmel_aes_cfb_encrypt,
1040		.decrypt	= atmel_aes_cfb_decrypt,
1041	}
1042},
1043{
1044	.cra_name		= "cfb32(aes)",
1045	.cra_driver_name	= "atmel-cfb32-aes",
1046	.cra_priority		= 100,
1047	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1048	.cra_blocksize		= CFB32_BLOCK_SIZE,
1049	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
1050	.cra_alignmask		= 0x3,
1051	.cra_type		= &crypto_ablkcipher_type,
1052	.cra_module		= THIS_MODULE,
1053	.cra_init		= atmel_aes_cra_init,
1054	.cra_exit		= atmel_aes_cra_exit,
1055	.cra_u.ablkcipher = {
1056		.min_keysize	= AES_MIN_KEY_SIZE,
1057		.max_keysize	= AES_MAX_KEY_SIZE,
1058		.ivsize		= AES_BLOCK_SIZE,
1059		.setkey		= atmel_aes_setkey,
1060		.encrypt	= atmel_aes_cfb32_encrypt,
1061		.decrypt	= atmel_aes_cfb32_decrypt,
1062	}
1063},
1064{
1065	.cra_name		= "cfb16(aes)",
1066	.cra_driver_name	= "atmel-cfb16-aes",
1067	.cra_priority		= 100,
1068	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1069	.cra_blocksize		= CFB16_BLOCK_SIZE,
1070	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
1071	.cra_alignmask		= 0x1,
1072	.cra_type		= &crypto_ablkcipher_type,
1073	.cra_module		= THIS_MODULE,
1074	.cra_init		= atmel_aes_cra_init,
1075	.cra_exit		= atmel_aes_cra_exit,
1076	.cra_u.ablkcipher = {
1077		.min_keysize	= AES_MIN_KEY_SIZE,
1078		.max_keysize	= AES_MAX_KEY_SIZE,
1079		.ivsize		= AES_BLOCK_SIZE,
1080		.setkey		= atmel_aes_setkey,
1081		.encrypt	= atmel_aes_cfb16_encrypt,
1082		.decrypt	= atmel_aes_cfb16_decrypt,
1083	}
1084},
1085{
1086	.cra_name		= "cfb8(aes)",
1087	.cra_driver_name	= "atmel-cfb8-aes",
1088	.cra_priority		= 100,
1089	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1090	.cra_blocksize		= CFB8_BLOCK_SIZE,
1091	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
1092	.cra_alignmask		= 0x0,
1093	.cra_type		= &crypto_ablkcipher_type,
1094	.cra_module		= THIS_MODULE,
1095	.cra_init		= atmel_aes_cra_init,
1096	.cra_exit		= atmel_aes_cra_exit,
1097	.cra_u.ablkcipher = {
1098		.min_keysize	= AES_MIN_KEY_SIZE,
1099		.max_keysize	= AES_MAX_KEY_SIZE,
1100		.ivsize		= AES_BLOCK_SIZE,
1101		.setkey		= atmel_aes_setkey,
1102		.encrypt	= atmel_aes_cfb8_encrypt,
1103		.decrypt	= atmel_aes_cfb8_decrypt,
1104	}
1105},
1106{
1107	.cra_name		= "ctr(aes)",
1108	.cra_driver_name	= "atmel-ctr-aes",
1109	.cra_priority		= 100,
1110	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1111	.cra_blocksize		= AES_BLOCK_SIZE,
1112	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
1113	.cra_alignmask		= 0xf,
1114	.cra_type		= &crypto_ablkcipher_type,
1115	.cra_module		= THIS_MODULE,
1116	.cra_init		= atmel_aes_cra_init,
1117	.cra_exit		= atmel_aes_cra_exit,
1118	.cra_u.ablkcipher = {
1119		.min_keysize	= AES_MIN_KEY_SIZE,
1120		.max_keysize	= AES_MAX_KEY_SIZE,
1121		.ivsize		= AES_BLOCK_SIZE,
1122		.setkey		= atmel_aes_setkey,
1123		.encrypt	= atmel_aes_ctr_encrypt,
1124		.decrypt	= atmel_aes_ctr_decrypt,
1125	}
1126},
1127};
1128
1129static struct crypto_alg aes_cfb64_alg = {
1130	.cra_name		= "cfb64(aes)",
1131	.cra_driver_name	= "atmel-cfb64-aes",
1132	.cra_priority		= 100,
1133	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1134	.cra_blocksize		= CFB64_BLOCK_SIZE,
1135	.cra_ctxsize		= sizeof(struct atmel_aes_ctx),
1136	.cra_alignmask		= 0x7,
1137	.cra_type		= &crypto_ablkcipher_type,
1138	.cra_module		= THIS_MODULE,
1139	.cra_init		= atmel_aes_cra_init,
1140	.cra_exit		= atmel_aes_cra_exit,
1141	.cra_u.ablkcipher = {
1142		.min_keysize	= AES_MIN_KEY_SIZE,
1143		.max_keysize	= AES_MAX_KEY_SIZE,
1144		.ivsize		= AES_BLOCK_SIZE,
1145		.setkey		= atmel_aes_setkey,
1146		.encrypt	= atmel_aes_cfb64_encrypt,
1147		.decrypt	= atmel_aes_cfb64_decrypt,
1148	}
1149};
1150
1151static void atmel_aes_queue_task(unsigned long data)
1152{
1153	struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
1154
1155	atmel_aes_handle_queue(dd, NULL);
1156}
1157
1158static void atmel_aes_done_task(unsigned long data)
1159{
1160	struct atmel_aes_dev *dd = (struct atmel_aes_dev *) data;
1161	int err;
1162
1163	if (!(dd->flags & AES_FLAGS_DMA)) {
1164		atmel_aes_read_n(dd, AES_ODATAR(0), (u32 *) dd->buf_out,
1165				dd->bufcnt >> 2);
1166
1167		if (sg_copy_from_buffer(dd->out_sg, dd->nb_out_sg,
1168			dd->buf_out, dd->bufcnt))
1169			err = 0;
1170		else
1171			err = -EINVAL;
1172
1173		goto cpu_end;
1174	}
1175
1176	err = atmel_aes_crypt_dma_stop(dd);
1177
1178	err = dd->err ? : err;
1179
1180	if (dd->total && !err) {
1181		if (dd->flags & AES_FLAGS_FAST) {
1182			dd->in_sg = sg_next(dd->in_sg);
1183			dd->out_sg = sg_next(dd->out_sg);
1184			if (!dd->in_sg || !dd->out_sg)
1185				err = -EINVAL;
1186		}
1187		if (!err)
1188			err = atmel_aes_crypt_dma_start(dd);
1189		if (!err)
1190			return; /* DMA started. Not fininishing. */
1191	}
1192
1193cpu_end:
1194	atmel_aes_finish_req(dd, err);
1195	atmel_aes_handle_queue(dd, NULL);
1196}
1197
1198static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
1199{
1200	struct atmel_aes_dev *aes_dd = dev_id;
1201	u32 reg;
1202
1203	reg = atmel_aes_read(aes_dd, AES_ISR);
1204	if (reg & atmel_aes_read(aes_dd, AES_IMR)) {
1205		atmel_aes_write(aes_dd, AES_IDR, reg);
1206		if (AES_FLAGS_BUSY & aes_dd->flags)
1207			tasklet_schedule(&aes_dd->done_task);
1208		else
1209			dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n");
1210		return IRQ_HANDLED;
1211	}
1212
1213	return IRQ_NONE;
1214}
1215
1216static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
1217{
1218	int i;
1219
1220	for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1221		crypto_unregister_alg(&aes_algs[i]);
1222	if (dd->caps.has_cfb64)
1223		crypto_unregister_alg(&aes_cfb64_alg);
1224}
1225
1226static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
1227{
1228	int err, i, j;
1229
1230	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1231		err = crypto_register_alg(&aes_algs[i]);
1232		if (err)
1233			goto err_aes_algs;
1234	}
1235
1236	if (dd->caps.has_cfb64) {
1237		err = crypto_register_alg(&aes_cfb64_alg);
1238		if (err)
1239			goto err_aes_cfb64_alg;
1240	}
1241
1242	return 0;
1243
1244err_aes_cfb64_alg:
1245	i = ARRAY_SIZE(aes_algs);
1246err_aes_algs:
1247	for (j = 0; j < i; j++)
1248		crypto_unregister_alg(&aes_algs[j]);
1249
1250	return err;
1251}
1252
1253static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
1254{
1255	dd->caps.has_dualbuff = 0;
1256	dd->caps.has_cfb64 = 0;
1257	dd->caps.max_burst_size = 1;
1258
1259	/* keep only major version number */
1260	switch (dd->hw_version & 0xff0) {
1261	case 0x200:
1262		dd->caps.has_dualbuff = 1;
1263		dd->caps.has_cfb64 = 1;
1264		dd->caps.max_burst_size = 4;
1265		break;
1266	case 0x130:
1267		dd->caps.has_dualbuff = 1;
1268		dd->caps.has_cfb64 = 1;
1269		dd->caps.max_burst_size = 4;
1270		break;
1271	case 0x120:
1272		break;
1273	default:
1274		dev_warn(dd->dev,
1275				"Unmanaged aes version, set minimum capabilities\n");
1276		break;
1277	}
1278}
1279
1280#if defined(CONFIG_OF)
1281static const struct of_device_id atmel_aes_dt_ids[] = {
1282	{ .compatible = "atmel,at91sam9g46-aes" },
1283	{ /* sentinel */ }
1284};
1285MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
1286
1287static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
1288{
1289	struct device_node *np = pdev->dev.of_node;
1290	struct crypto_platform_data *pdata;
1291
1292	if (!np) {
1293		dev_err(&pdev->dev, "device node not found\n");
1294		return ERR_PTR(-EINVAL);
1295	}
1296
1297	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1298	if (!pdata) {
1299		dev_err(&pdev->dev, "could not allocate memory for pdata\n");
1300		return ERR_PTR(-ENOMEM);
1301	}
1302
1303	pdata->dma_slave = devm_kzalloc(&pdev->dev,
1304					sizeof(*(pdata->dma_slave)),
1305					GFP_KERNEL);
1306	if (!pdata->dma_slave) {
1307		dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
1308		devm_kfree(&pdev->dev, pdata);
1309		return ERR_PTR(-ENOMEM);
1310	}
1311
1312	return pdata;
1313}
1314#else
1315static inline struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
1316{
1317	return ERR_PTR(-EINVAL);
1318}
1319#endif
1320
1321static int atmel_aes_probe(struct platform_device *pdev)
1322{
1323	struct atmel_aes_dev *aes_dd;
1324	struct crypto_platform_data *pdata;
1325	struct device *dev = &pdev->dev;
1326	struct resource *aes_res;
1327	int err;
1328
1329	pdata = pdev->dev.platform_data;
1330	if (!pdata) {
1331		pdata = atmel_aes_of_init(pdev);
1332		if (IS_ERR(pdata)) {
1333			err = PTR_ERR(pdata);
1334			goto aes_dd_err;
1335		}
1336	}
1337
1338	if (!pdata->dma_slave) {
1339		err = -ENXIO;
1340		goto aes_dd_err;
1341	}
1342
1343	aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL);
1344	if (aes_dd == NULL) {
1345		dev_err(dev, "unable to alloc data struct.\n");
1346		err = -ENOMEM;
1347		goto aes_dd_err;
1348	}
1349
1350	aes_dd->dev = dev;
1351
1352	platform_set_drvdata(pdev, aes_dd);
1353
1354	INIT_LIST_HEAD(&aes_dd->list);
1355	spin_lock_init(&aes_dd->lock);
1356
1357	tasklet_init(&aes_dd->done_task, atmel_aes_done_task,
1358					(unsigned long)aes_dd);
1359	tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task,
1360					(unsigned long)aes_dd);
1361
1362	crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
1363
1364	aes_dd->irq = -1;
1365
1366	/* Get the base address */
1367	aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1368	if (!aes_res) {
1369		dev_err(dev, "no MEM resource info\n");
1370		err = -ENODEV;
1371		goto res_err;
1372	}
1373	aes_dd->phys_base = aes_res->start;
1374
1375	/* Get the IRQ */
1376	aes_dd->irq = platform_get_irq(pdev,  0);
1377	if (aes_dd->irq < 0) {
1378		dev_err(dev, "no IRQ resource info\n");
1379		err = aes_dd->irq;
1380		goto res_err;
1381	}
1382
1383	err = devm_request_irq(&pdev->dev, aes_dd->irq, atmel_aes_irq,
1384			       IRQF_SHARED, "atmel-aes", aes_dd);
1385	if (err) {
1386		dev_err(dev, "unable to request aes irq.\n");
1387		goto res_err;
1388	}
1389
1390	/* Initializing the clock */
1391	aes_dd->iclk = devm_clk_get(&pdev->dev, "aes_clk");
1392	if (IS_ERR(aes_dd->iclk)) {
1393		dev_err(dev, "clock initialization failed.\n");
1394		err = PTR_ERR(aes_dd->iclk);
1395		goto res_err;
1396	}
1397
1398	aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
1399	if (!aes_dd->io_base) {
1400		dev_err(dev, "can't ioremap\n");
1401		err = -ENOMEM;
1402		goto res_err;
1403	}
1404
1405	atmel_aes_hw_version_init(aes_dd);
1406
1407	atmel_aes_get_cap(aes_dd);
1408
1409	err = atmel_aes_buff_init(aes_dd);
1410	if (err)
1411		goto err_aes_buff;
1412
1413	err = atmel_aes_dma_init(aes_dd, pdata);
1414	if (err)
1415		goto err_aes_dma;
1416
1417	spin_lock(&atmel_aes.lock);
1418	list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
1419	spin_unlock(&atmel_aes.lock);
1420
1421	err = atmel_aes_register_algs(aes_dd);
1422	if (err)
1423		goto err_algs;
1424
1425	dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n",
1426			dma_chan_name(aes_dd->dma_lch_in.chan),
1427			dma_chan_name(aes_dd->dma_lch_out.chan));
1428
1429	return 0;
1430
1431err_algs:
1432	spin_lock(&atmel_aes.lock);
1433	list_del(&aes_dd->list);
1434	spin_unlock(&atmel_aes.lock);
1435	atmel_aes_dma_cleanup(aes_dd);
1436err_aes_dma:
1437	atmel_aes_buff_cleanup(aes_dd);
1438err_aes_buff:
1439res_err:
1440	tasklet_kill(&aes_dd->done_task);
1441	tasklet_kill(&aes_dd->queue_task);
1442aes_dd_err:
1443	dev_err(dev, "initialization failed.\n");
1444
1445	return err;
1446}
1447
1448static int atmel_aes_remove(struct platform_device *pdev)
1449{
1450	static struct atmel_aes_dev *aes_dd;
1451
1452	aes_dd = platform_get_drvdata(pdev);
1453	if (!aes_dd)
1454		return -ENODEV;
1455	spin_lock(&atmel_aes.lock);
1456	list_del(&aes_dd->list);
1457	spin_unlock(&atmel_aes.lock);
1458
1459	atmel_aes_unregister_algs(aes_dd);
1460
1461	tasklet_kill(&aes_dd->done_task);
1462	tasklet_kill(&aes_dd->queue_task);
1463
1464	atmel_aes_dma_cleanup(aes_dd);
1465
1466	return 0;
1467}
1468
1469static struct platform_driver atmel_aes_driver = {
1470	.probe		= atmel_aes_probe,
1471	.remove		= atmel_aes_remove,
1472	.driver		= {
1473		.name	= "atmel_aes",
1474		.of_match_table = of_match_ptr(atmel_aes_dt_ids),
1475	},
1476};
1477
1478module_platform_driver(atmel_aes_driver);
1479
1480MODULE_DESCRIPTION("Atmel AES hw acceleration support.");
1481MODULE_LICENSE("GPL v2");
1482MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
1483