1/*
2 * talitos - Freescale Integrated Security Engine (SEC) device driver
3 *
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/mod_devicetable.h>
31#include <linux/device.h>
32#include <linux/interrupt.h>
33#include <linux/crypto.h>
34#include <linux/hw_random.h>
35#include <linux/of_address.h>
36#include <linux/of_irq.h>
37#include <linux/of_platform.h>
38#include <linux/dma-mapping.h>
39#include <linux/io.h>
40#include <linux/spinlock.h>
41#include <linux/rtnetlink.h>
42#include <linux/slab.h>
43
44#include <crypto/algapi.h>
45#include <crypto/aes.h>
46#include <crypto/des.h>
47#include <crypto/sha.h>
48#include <crypto/md5.h>
49#include <crypto/aead.h>
50#include <crypto/authenc.h>
51#include <crypto/skcipher.h>
52#include <crypto/hash.h>
53#include <crypto/internal/hash.h>
54#include <crypto/scatterwalk.h>
55
56#include "talitos.h"
57
58static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr)
59{
60	talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
61	talitos_ptr->eptr = upper_32_bits(dma_addr);
62}
63
64/*
65 * map virtual single (contiguous) pointer to h/w descriptor pointer
66 */
67static void map_single_talitos_ptr(struct device *dev,
68				   struct talitos_ptr *talitos_ptr,
69				   unsigned short len, void *data,
70				   unsigned char extent,
71				   enum dma_data_direction dir)
72{
73	dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
74
75	talitos_ptr->len = cpu_to_be16(len);
76	to_talitos_ptr(talitos_ptr, dma_addr);
77	talitos_ptr->j_extent = extent;
78}
79
80/*
81 * unmap bus single (contiguous) h/w descriptor pointer
82 */
83static void unmap_single_talitos_ptr(struct device *dev,
84				     struct talitos_ptr *talitos_ptr,
85				     enum dma_data_direction dir)
86{
87	dma_unmap_single(dev, be32_to_cpu(talitos_ptr->ptr),
88			 be16_to_cpu(talitos_ptr->len), dir);
89}
90
91static int reset_channel(struct device *dev, int ch)
92{
93	struct talitos_private *priv = dev_get_drvdata(dev);
94	unsigned int timeout = TALITOS_TIMEOUT;
95
96	setbits32(priv->chan[ch].reg + TALITOS_CCCR, TALITOS_CCCR_RESET);
97
98	while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & TALITOS_CCCR_RESET)
99	       && --timeout)
100		cpu_relax();
101
102	if (timeout == 0) {
103		dev_err(dev, "failed to reset channel %d\n", ch);
104		return -EIO;
105	}
106
107	/* set 36-bit addressing, done writeback enable and done IRQ enable */
108	setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
109		  TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
110
111	/* and ICCR writeback, if available */
112	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
113		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
114		          TALITOS_CCCR_LO_IWSE);
115
116	return 0;
117}
118
119static int reset_device(struct device *dev)
120{
121	struct talitos_private *priv = dev_get_drvdata(dev);
122	unsigned int timeout = TALITOS_TIMEOUT;
123	u32 mcr = TALITOS_MCR_SWR;
124
125	setbits32(priv->reg + TALITOS_MCR, mcr);
126
127	while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR)
128	       && --timeout)
129		cpu_relax();
130
131	if (priv->irq[1]) {
132		mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
133		setbits32(priv->reg + TALITOS_MCR, mcr);
134	}
135
136	if (timeout == 0) {
137		dev_err(dev, "failed to reset device\n");
138		return -EIO;
139	}
140
141	return 0;
142}
143
144/*
145 * Reset and initialize the device
146 */
147static int init_device(struct device *dev)
148{
149	struct talitos_private *priv = dev_get_drvdata(dev);
150	int ch, err;
151
152	/*
153	 * Master reset
154	 * errata documentation: warning: certain SEC interrupts
155	 * are not fully cleared by writing the MCR:SWR bit,
156	 * set bit twice to completely reset
157	 */
158	err = reset_device(dev);
159	if (err)
160		return err;
161
162	err = reset_device(dev);
163	if (err)
164		return err;
165
166	/* reset channels */
167	for (ch = 0; ch < priv->num_channels; ch++) {
168		err = reset_channel(dev, ch);
169		if (err)
170			return err;
171	}
172
173	/* enable channel done and error interrupts */
174	setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
175	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
176
177	/* disable integrity check error interrupts (use writeback instead) */
178	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
179		setbits32(priv->reg + TALITOS_MDEUICR_LO,
180		          TALITOS_MDEUICR_LO_ICE);
181
182	return 0;
183}
184
185/**
186 * talitos_submit - submits a descriptor to the device for processing
187 * @dev:	the SEC device to be used
188 * @ch:		the SEC device channel to be used
189 * @desc:	the descriptor to be processed by the device
190 * @callback:	whom to call when processing is complete
191 * @context:	a handle for use by caller (optional)
192 *
193 * desc must contain valid dma-mapped (bus physical) address pointers.
194 * callback must check err and feedback in descriptor header
195 * for device processing status.
196 */
197int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
198		   void (*callback)(struct device *dev,
199				    struct talitos_desc *desc,
200				    void *context, int error),
201		   void *context)
202{
203	struct talitos_private *priv = dev_get_drvdata(dev);
204	struct talitos_request *request;
205	unsigned long flags;
206	int head;
207
208	spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
209
210	if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
211		/* h/w fifo is full */
212		spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
213		return -EAGAIN;
214	}
215
216	head = priv->chan[ch].head;
217	request = &priv->chan[ch].fifo[head];
218
219	/* map descriptor and save caller data */
220	request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
221					   DMA_BIDIRECTIONAL);
222	request->callback = callback;
223	request->context = context;
224
225	/* increment fifo head */
226	priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
227
228	smp_wmb();
229	request->desc = desc;
230
231	/* GO! */
232	wmb();
233	out_be32(priv->chan[ch].reg + TALITOS_FF,
234		 upper_32_bits(request->dma_desc));
235	out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
236		 lower_32_bits(request->dma_desc));
237
238	spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
239
240	return -EINPROGRESS;
241}
242EXPORT_SYMBOL(talitos_submit);
243
244/*
245 * process what was done, notify callback of error if not
246 */
247static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
248{
249	struct talitos_private *priv = dev_get_drvdata(dev);
250	struct talitos_request *request, saved_req;
251	unsigned long flags;
252	int tail, status;
253
254	spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
255
256	tail = priv->chan[ch].tail;
257	while (priv->chan[ch].fifo[tail].desc) {
258		request = &priv->chan[ch].fifo[tail];
259
260		/* descriptors with their done bits set don't get the error */
261		rmb();
262		if ((request->desc->hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
263			status = 0;
264		else
265			if (!error)
266				break;
267			else
268				status = error;
269
270		dma_unmap_single(dev, request->dma_desc,
271				 sizeof(struct talitos_desc),
272				 DMA_BIDIRECTIONAL);
273
274		/* copy entries so we can call callback outside lock */
275		saved_req.desc = request->desc;
276		saved_req.callback = request->callback;
277		saved_req.context = request->context;
278
279		/* release request entry in fifo */
280		smp_wmb();
281		request->desc = NULL;
282
283		/* increment fifo tail */
284		priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
285
286		spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
287
288		atomic_dec(&priv->chan[ch].submit_count);
289
290		saved_req.callback(dev, saved_req.desc, saved_req.context,
291				   status);
292		/* channel may resume processing in single desc error case */
293		if (error && !reset_ch && status == error)
294			return;
295		spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
296		tail = priv->chan[ch].tail;
297	}
298
299	spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
300}
301
302/*
303 * process completed requests for channels that have done status
304 */
305#define DEF_TALITOS_DONE(name, ch_done_mask)				\
306static void talitos_done_##name(unsigned long data)			\
307{									\
308	struct device *dev = (struct device *)data;			\
309	struct talitos_private *priv = dev_get_drvdata(dev);		\
310	unsigned long flags;						\
311									\
312	if (ch_done_mask & 1)						\
313		flush_channel(dev, 0, 0, 0);				\
314	if (priv->num_channels == 1)					\
315		goto out;						\
316	if (ch_done_mask & (1 << 2))					\
317		flush_channel(dev, 1, 0, 0);				\
318	if (ch_done_mask & (1 << 4))					\
319		flush_channel(dev, 2, 0, 0);				\
320	if (ch_done_mask & (1 << 6))					\
321		flush_channel(dev, 3, 0, 0);				\
322									\
323out:									\
324	/* At this point, all completed channels have been processed */	\
325	/* Unmask done interrupts for channels completed later on. */	\
326	spin_lock_irqsave(&priv->reg_lock, flags);			\
327	setbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
328	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);	\
329	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
330}
331DEF_TALITOS_DONE(4ch, TALITOS_ISR_4CHDONE)
332DEF_TALITOS_DONE(ch0_2, TALITOS_ISR_CH_0_2_DONE)
333DEF_TALITOS_DONE(ch1_3, TALITOS_ISR_CH_1_3_DONE)
334
335/*
336 * locate current (offending) descriptor
337 */
338static u32 current_desc_hdr(struct device *dev, int ch)
339{
340	struct talitos_private *priv = dev_get_drvdata(dev);
341	int tail, iter;
342	dma_addr_t cur_desc;
343
344	cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
345	cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
346
347	if (!cur_desc) {
348		dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
349		return 0;
350	}
351
352	tail = priv->chan[ch].tail;
353
354	iter = tail;
355	while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
356		iter = (iter + 1) & (priv->fifo_len - 1);
357		if (iter == tail) {
358			dev_err(dev, "couldn't locate current descriptor\n");
359			return 0;
360		}
361	}
362
363	return priv->chan[ch].fifo[iter].desc->hdr;
364}
365
366/*
367 * user diagnostics; report root cause of error based on execution unit status
368 */
369static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
370{
371	struct talitos_private *priv = dev_get_drvdata(dev);
372	int i;
373
374	if (!desc_hdr)
375		desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
376
377	switch (desc_hdr & DESC_HDR_SEL0_MASK) {
378	case DESC_HDR_SEL0_AFEU:
379		dev_err(dev, "AFEUISR 0x%08x_%08x\n",
380			in_be32(priv->reg + TALITOS_AFEUISR),
381			in_be32(priv->reg + TALITOS_AFEUISR_LO));
382		break;
383	case DESC_HDR_SEL0_DEU:
384		dev_err(dev, "DEUISR 0x%08x_%08x\n",
385			in_be32(priv->reg + TALITOS_DEUISR),
386			in_be32(priv->reg + TALITOS_DEUISR_LO));
387		break;
388	case DESC_HDR_SEL0_MDEUA:
389	case DESC_HDR_SEL0_MDEUB:
390		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
391			in_be32(priv->reg + TALITOS_MDEUISR),
392			in_be32(priv->reg + TALITOS_MDEUISR_LO));
393		break;
394	case DESC_HDR_SEL0_RNG:
395		dev_err(dev, "RNGUISR 0x%08x_%08x\n",
396			in_be32(priv->reg + TALITOS_RNGUISR),
397			in_be32(priv->reg + TALITOS_RNGUISR_LO));
398		break;
399	case DESC_HDR_SEL0_PKEU:
400		dev_err(dev, "PKEUISR 0x%08x_%08x\n",
401			in_be32(priv->reg + TALITOS_PKEUISR),
402			in_be32(priv->reg + TALITOS_PKEUISR_LO));
403		break;
404	case DESC_HDR_SEL0_AESU:
405		dev_err(dev, "AESUISR 0x%08x_%08x\n",
406			in_be32(priv->reg + TALITOS_AESUISR),
407			in_be32(priv->reg + TALITOS_AESUISR_LO));
408		break;
409	case DESC_HDR_SEL0_CRCU:
410		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
411			in_be32(priv->reg + TALITOS_CRCUISR),
412			in_be32(priv->reg + TALITOS_CRCUISR_LO));
413		break;
414	case DESC_HDR_SEL0_KEU:
415		dev_err(dev, "KEUISR 0x%08x_%08x\n",
416			in_be32(priv->reg + TALITOS_KEUISR),
417			in_be32(priv->reg + TALITOS_KEUISR_LO));
418		break;
419	}
420
421	switch (desc_hdr & DESC_HDR_SEL1_MASK) {
422	case DESC_HDR_SEL1_MDEUA:
423	case DESC_HDR_SEL1_MDEUB:
424		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
425			in_be32(priv->reg + TALITOS_MDEUISR),
426			in_be32(priv->reg + TALITOS_MDEUISR_LO));
427		break;
428	case DESC_HDR_SEL1_CRCU:
429		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
430			in_be32(priv->reg + TALITOS_CRCUISR),
431			in_be32(priv->reg + TALITOS_CRCUISR_LO));
432		break;
433	}
434
435	for (i = 0; i < 8; i++)
436		dev_err(dev, "DESCBUF 0x%08x_%08x\n",
437			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
438			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
439}
440
441/*
442 * recover from error interrupts
443 */
444static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
445{
446	struct talitos_private *priv = dev_get_drvdata(dev);
447	unsigned int timeout = TALITOS_TIMEOUT;
448	int ch, error, reset_dev = 0, reset_ch = 0;
449	u32 v, v_lo;
450
451	for (ch = 0; ch < priv->num_channels; ch++) {
452		/* skip channels without errors */
453		if (!(isr & (1 << (ch * 2 + 1))))
454			continue;
455
456		error = -EINVAL;
457
458		v = in_be32(priv->chan[ch].reg + TALITOS_CCPSR);
459		v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
460
461		if (v_lo & TALITOS_CCPSR_LO_DOF) {
462			dev_err(dev, "double fetch fifo overflow error\n");
463			error = -EAGAIN;
464			reset_ch = 1;
465		}
466		if (v_lo & TALITOS_CCPSR_LO_SOF) {
467			/* h/w dropped descriptor */
468			dev_err(dev, "single fetch fifo overflow error\n");
469			error = -EAGAIN;
470		}
471		if (v_lo & TALITOS_CCPSR_LO_MDTE)
472			dev_err(dev, "master data transfer error\n");
473		if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
474			dev_err(dev, "s/g data length zero error\n");
475		if (v_lo & TALITOS_CCPSR_LO_FPZ)
476			dev_err(dev, "fetch pointer zero error\n");
477		if (v_lo & TALITOS_CCPSR_LO_IDH)
478			dev_err(dev, "illegal descriptor header error\n");
479		if (v_lo & TALITOS_CCPSR_LO_IEU)
480			dev_err(dev, "invalid execution unit error\n");
481		if (v_lo & TALITOS_CCPSR_LO_EU)
482			report_eu_error(dev, ch, current_desc_hdr(dev, ch));
483		if (v_lo & TALITOS_CCPSR_LO_GB)
484			dev_err(dev, "gather boundary error\n");
485		if (v_lo & TALITOS_CCPSR_LO_GRL)
486			dev_err(dev, "gather return/length error\n");
487		if (v_lo & TALITOS_CCPSR_LO_SB)
488			dev_err(dev, "scatter boundary error\n");
489		if (v_lo & TALITOS_CCPSR_LO_SRL)
490			dev_err(dev, "scatter return/length error\n");
491
492		flush_channel(dev, ch, error, reset_ch);
493
494		if (reset_ch) {
495			reset_channel(dev, ch);
496		} else {
497			setbits32(priv->chan[ch].reg + TALITOS_CCCR,
498				  TALITOS_CCCR_CONT);
499			setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
500			while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
501			       TALITOS_CCCR_CONT) && --timeout)
502				cpu_relax();
503			if (timeout == 0) {
504				dev_err(dev, "failed to restart channel %d\n",
505					ch);
506				reset_dev = 1;
507			}
508		}
509	}
510	if (reset_dev || isr & ~TALITOS_ISR_4CHERR || isr_lo) {
511		dev_err(dev, "done overflow, internal time out, or rngu error: "
512		        "ISR 0x%08x_%08x\n", isr, isr_lo);
513
514		/* purge request queues */
515		for (ch = 0; ch < priv->num_channels; ch++)
516			flush_channel(dev, ch, -EIO, 1);
517
518		/* reset and reinitialize the device */
519		init_device(dev);
520	}
521}
522
523#define DEF_TALITOS_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
524static irqreturn_t talitos_interrupt_##name(int irq, void *data)	       \
525{									       \
526	struct device *dev = data;					       \
527	struct talitos_private *priv = dev_get_drvdata(dev);		       \
528	u32 isr, isr_lo;						       \
529	unsigned long flags;						       \
530									       \
531	spin_lock_irqsave(&priv->reg_lock, flags);			       \
532	isr = in_be32(priv->reg + TALITOS_ISR);				       \
533	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
534	/* Acknowledge interrupt */					       \
535	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
536	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
537									       \
538	if (unlikely(isr & ch_err_mask || isr_lo)) {			       \
539		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
540		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
541	}								       \
542	else {								       \
543		if (likely(isr & ch_done_mask)) {			       \
544			/* mask further done interrupts. */		       \
545			clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
546			/* done_task will unmask done interrupts at exit */    \
547			tasklet_schedule(&priv->done_task[tlet]);	       \
548		}							       \
549		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
550	}								       \
551									       \
552	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
553								IRQ_NONE;      \
554}
555DEF_TALITOS_INTERRUPT(4ch, TALITOS_ISR_4CHDONE, TALITOS_ISR_4CHERR, 0)
556DEF_TALITOS_INTERRUPT(ch0_2, TALITOS_ISR_CH_0_2_DONE, TALITOS_ISR_CH_0_2_ERR, 0)
557DEF_TALITOS_INTERRUPT(ch1_3, TALITOS_ISR_CH_1_3_DONE, TALITOS_ISR_CH_1_3_ERR, 1)
558
559/*
560 * hwrng
561 */
562static int talitos_rng_data_present(struct hwrng *rng, int wait)
563{
564	struct device *dev = (struct device *)rng->priv;
565	struct talitos_private *priv = dev_get_drvdata(dev);
566	u32 ofl;
567	int i;
568
569	for (i = 0; i < 20; i++) {
570		ofl = in_be32(priv->reg + TALITOS_RNGUSR_LO) &
571		      TALITOS_RNGUSR_LO_OFL;
572		if (ofl || !wait)
573			break;
574		udelay(10);
575	}
576
577	return !!ofl;
578}
579
580static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
581{
582	struct device *dev = (struct device *)rng->priv;
583	struct talitos_private *priv = dev_get_drvdata(dev);
584
585	/* rng fifo requires 64-bit accesses */
586	*data = in_be32(priv->reg + TALITOS_RNGU_FIFO);
587	*data = in_be32(priv->reg + TALITOS_RNGU_FIFO_LO);
588
589	return sizeof(u32);
590}
591
592static int talitos_rng_init(struct hwrng *rng)
593{
594	struct device *dev = (struct device *)rng->priv;
595	struct talitos_private *priv = dev_get_drvdata(dev);
596	unsigned int timeout = TALITOS_TIMEOUT;
597
598	setbits32(priv->reg + TALITOS_RNGURCR_LO, TALITOS_RNGURCR_LO_SR);
599	while (!(in_be32(priv->reg + TALITOS_RNGUSR_LO) & TALITOS_RNGUSR_LO_RD)
600	       && --timeout)
601		cpu_relax();
602	if (timeout == 0) {
603		dev_err(dev, "failed to reset rng hw\n");
604		return -ENODEV;
605	}
606
607	/* start generating */
608	setbits32(priv->reg + TALITOS_RNGUDSR_LO, 0);
609
610	return 0;
611}
612
613static int talitos_register_rng(struct device *dev)
614{
615	struct talitos_private *priv = dev_get_drvdata(dev);
616
617	priv->rng.name		= dev_driver_string(dev),
618	priv->rng.init		= talitos_rng_init,
619	priv->rng.data_present	= talitos_rng_data_present,
620	priv->rng.data_read	= talitos_rng_data_read,
621	priv->rng.priv		= (unsigned long)dev;
622
623	return hwrng_register(&priv->rng);
624}
625
626static void talitos_unregister_rng(struct device *dev)
627{
628	struct talitos_private *priv = dev_get_drvdata(dev);
629
630	hwrng_unregister(&priv->rng);
631}
632
633/*
634 * crypto alg
635 */
636#define TALITOS_CRA_PRIORITY		3000
637#define TALITOS_MAX_KEY_SIZE		96
638#define TALITOS_MAX_IV_LENGTH		16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
639
640struct talitos_ctx {
641	struct device *dev;
642	int ch;
643	__be32 desc_hdr_template;
644	u8 key[TALITOS_MAX_KEY_SIZE];
645	u8 iv[TALITOS_MAX_IV_LENGTH];
646	unsigned int keylen;
647	unsigned int enckeylen;
648	unsigned int authkeylen;
649	unsigned int authsize;
650};
651
652#define HASH_MAX_BLOCK_SIZE		SHA512_BLOCK_SIZE
653#define TALITOS_MDEU_MAX_CONTEXT_SIZE	TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
654
655struct talitos_ahash_req_ctx {
656	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
657	unsigned int hw_context_size;
658	u8 buf[HASH_MAX_BLOCK_SIZE];
659	u8 bufnext[HASH_MAX_BLOCK_SIZE];
660	unsigned int swinit;
661	unsigned int first;
662	unsigned int last;
663	unsigned int to_hash_later;
664	u64 nbuf;
665	struct scatterlist bufsl[2];
666	struct scatterlist *psrc;
667};
668
669static int aead_setauthsize(struct crypto_aead *authenc,
670			    unsigned int authsize)
671{
672	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
673
674	ctx->authsize = authsize;
675
676	return 0;
677}
678
679static int aead_setkey(struct crypto_aead *authenc,
680		       const u8 *key, unsigned int keylen)
681{
682	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
683	struct crypto_authenc_keys keys;
684
685	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
686		goto badkey;
687
688	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
689		goto badkey;
690
691	memcpy(ctx->key, keys.authkey, keys.authkeylen);
692	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
693
694	ctx->keylen = keys.authkeylen + keys.enckeylen;
695	ctx->enckeylen = keys.enckeylen;
696	ctx->authkeylen = keys.authkeylen;
697
698	return 0;
699
700badkey:
701	crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
702	return -EINVAL;
703}
704
705/*
706 * talitos_edesc - s/w-extended descriptor
707 * @assoc_nents: number of segments in associated data scatterlist
708 * @src_nents: number of segments in input scatterlist
709 * @dst_nents: number of segments in output scatterlist
710 * @assoc_chained: whether assoc is chained or not
711 * @src_chained: whether src is chained or not
712 * @dst_chained: whether dst is chained or not
713 * @iv_dma: dma address of iv for checking continuity and link table
714 * @dma_len: length of dma mapped link_tbl space
715 * @dma_link_tbl: bus physical address of link_tbl
716 * @desc: h/w descriptor
717 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1)
718 *
719 * if decrypting (with authcheck), or either one of src_nents or dst_nents
720 * is greater than 1, an integrity check value is concatenated to the end
721 * of link_tbl data
722 */
723struct talitos_edesc {
724	int assoc_nents;
725	int src_nents;
726	int dst_nents;
727	bool assoc_chained;
728	bool src_chained;
729	bool dst_chained;
730	dma_addr_t iv_dma;
731	int dma_len;
732	dma_addr_t dma_link_tbl;
733	struct talitos_desc desc;
734	struct talitos_ptr link_tbl[0];
735};
736
737static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
738			  unsigned int nents, enum dma_data_direction dir,
739			  bool chained)
740{
741	if (unlikely(chained))
742		while (sg) {
743			dma_map_sg(dev, sg, 1, dir);
744			sg = sg_next(sg);
745		}
746	else
747		dma_map_sg(dev, sg, nents, dir);
748	return nents;
749}
750
751static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
752				   enum dma_data_direction dir)
753{
754	while (sg) {
755		dma_unmap_sg(dev, sg, 1, dir);
756		sg = sg_next(sg);
757	}
758}
759
760static void talitos_sg_unmap(struct device *dev,
761			     struct talitos_edesc *edesc,
762			     struct scatterlist *src,
763			     struct scatterlist *dst)
764{
765	unsigned int src_nents = edesc->src_nents ? : 1;
766	unsigned int dst_nents = edesc->dst_nents ? : 1;
767
768	if (src != dst) {
769		if (edesc->src_chained)
770			talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
771		else
772			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
773
774		if (dst) {
775			if (edesc->dst_chained)
776				talitos_unmap_sg_chain(dev, dst,
777						       DMA_FROM_DEVICE);
778			else
779				dma_unmap_sg(dev, dst, dst_nents,
780					     DMA_FROM_DEVICE);
781		}
782	} else
783		if (edesc->src_chained)
784			talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
785		else
786			dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
787}
788
789static void ipsec_esp_unmap(struct device *dev,
790			    struct talitos_edesc *edesc,
791			    struct aead_request *areq)
792{
793	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
794	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
795	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
796	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
797
798	if (edesc->assoc_chained)
799		talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE);
800	else if (areq->assoclen)
801		/* assoc_nents counts also for IV in non-contiguous cases */
802		dma_unmap_sg(dev, areq->assoc,
803			     edesc->assoc_nents ? edesc->assoc_nents - 1 : 1,
804			     DMA_TO_DEVICE);
805
806	talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
807
808	if (edesc->dma_len)
809		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
810				 DMA_BIDIRECTIONAL);
811}
812
813/*
814 * ipsec_esp descriptor callbacks
815 */
816static void ipsec_esp_encrypt_done(struct device *dev,
817				   struct talitos_desc *desc, void *context,
818				   int err)
819{
820	struct aead_request *areq = context;
821	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
822	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
823	struct talitos_edesc *edesc;
824	struct scatterlist *sg;
825	void *icvdata;
826
827	edesc = container_of(desc, struct talitos_edesc, desc);
828
829	ipsec_esp_unmap(dev, edesc, areq);
830
831	/* copy the generated ICV to dst */
832	if (edesc->dst_nents) {
833		icvdata = &edesc->link_tbl[edesc->src_nents +
834					   edesc->dst_nents + 2 +
835					   edesc->assoc_nents];
836		sg = sg_last(areq->dst, edesc->dst_nents);
837		memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
838		       icvdata, ctx->authsize);
839	}
840
841	kfree(edesc);
842
843	aead_request_complete(areq, err);
844}
845
846static void ipsec_esp_decrypt_swauth_done(struct device *dev,
847					  struct talitos_desc *desc,
848					  void *context, int err)
849{
850	struct aead_request *req = context;
851	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
852	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
853	struct talitos_edesc *edesc;
854	struct scatterlist *sg;
855	void *icvdata;
856
857	edesc = container_of(desc, struct talitos_edesc, desc);
858
859	ipsec_esp_unmap(dev, edesc, req);
860
861	if (!err) {
862		/* auth check */
863		if (edesc->dma_len)
864			icvdata = &edesc->link_tbl[edesc->src_nents +
865						   edesc->dst_nents + 2 +
866						   edesc->assoc_nents];
867		else
868			icvdata = &edesc->link_tbl[0];
869
870		sg = sg_last(req->dst, edesc->dst_nents ? : 1);
871		err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length -
872			     ctx->authsize, ctx->authsize) ? -EBADMSG : 0;
873	}
874
875	kfree(edesc);
876
877	aead_request_complete(req, err);
878}
879
880static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
881					  struct talitos_desc *desc,
882					  void *context, int err)
883{
884	struct aead_request *req = context;
885	struct talitos_edesc *edesc;
886
887	edesc = container_of(desc, struct talitos_edesc, desc);
888
889	ipsec_esp_unmap(dev, edesc, req);
890
891	/* check ICV auth status */
892	if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
893		     DESC_HDR_LO_ICCR1_PASS))
894		err = -EBADMSG;
895
896	kfree(edesc);
897
898	aead_request_complete(req, err);
899}
900
901/*
902 * convert scatterlist to SEC h/w link table format
903 * stop at cryptlen bytes
904 */
905static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
906			   int cryptlen, struct talitos_ptr *link_tbl_ptr)
907{
908	int n_sg = sg_count;
909
910	while (n_sg--) {
911		to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg));
912		link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
913		link_tbl_ptr->j_extent = 0;
914		link_tbl_ptr++;
915		cryptlen -= sg_dma_len(sg);
916		sg = sg_next(sg);
917	}
918
919	/* adjust (decrease) last one (or two) entry's len to cryptlen */
920	link_tbl_ptr--;
921	while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) {
922		/* Empty this entry, and move to previous one */
923		cryptlen += be16_to_cpu(link_tbl_ptr->len);
924		link_tbl_ptr->len = 0;
925		sg_count--;
926		link_tbl_ptr--;
927	}
928	link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
929					+ cryptlen);
930
931	/* tag end of link table */
932	link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
933
934	return sg_count;
935}
936
937/*
938 * fill in and submit ipsec_esp descriptor
939 */
940static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
941		     u64 seq, void (*callback) (struct device *dev,
942						struct talitos_desc *desc,
943						void *context, int error))
944{
945	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
946	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
947	struct device *dev = ctx->dev;
948	struct talitos_desc *desc = &edesc->desc;
949	unsigned int cryptlen = areq->cryptlen;
950	unsigned int authsize = ctx->authsize;
951	unsigned int ivsize = crypto_aead_ivsize(aead);
952	int sg_count, ret;
953	int sg_link_tbl_len;
954
955	/* hmac key */
956	map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
957			       0, DMA_TO_DEVICE);
958
959	/* hmac data */
960	desc->ptr[1].len = cpu_to_be16(areq->assoclen + ivsize);
961	if (edesc->assoc_nents) {
962		int tbl_off = edesc->src_nents + edesc->dst_nents + 2;
963		struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
964
965		to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
966			       sizeof(struct talitos_ptr));
967		desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
968
969		/* assoc_nents - 1 entries for assoc, 1 for IV */
970		sg_count = sg_to_link_tbl(areq->assoc, edesc->assoc_nents - 1,
971					  areq->assoclen, tbl_ptr);
972
973		/* add IV to link table */
974		tbl_ptr += sg_count - 1;
975		tbl_ptr->j_extent = 0;
976		tbl_ptr++;
977		to_talitos_ptr(tbl_ptr, edesc->iv_dma);
978		tbl_ptr->len = cpu_to_be16(ivsize);
979		tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
980
981		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
982					   edesc->dma_len, DMA_BIDIRECTIONAL);
983	} else {
984		if (areq->assoclen)
985			to_talitos_ptr(&desc->ptr[1],
986				       sg_dma_address(areq->assoc));
987		else
988			to_talitos_ptr(&desc->ptr[1], edesc->iv_dma);
989		desc->ptr[1].j_extent = 0;
990	}
991
992	/* cipher iv */
993	to_talitos_ptr(&desc->ptr[2], edesc->iv_dma);
994	desc->ptr[2].len = cpu_to_be16(ivsize);
995	desc->ptr[2].j_extent = 0;
996	/* Sync needed for the aead_givencrypt case */
997	dma_sync_single_for_device(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
998
999	/* cipher key */
1000	map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1001			       (char *)&ctx->key + ctx->authkeylen, 0,
1002			       DMA_TO_DEVICE);
1003
1004	/*
1005	 * cipher in
1006	 * map and adjust cipher len to aead request cryptlen.
1007	 * extent is bytes of HMAC postpended to ciphertext,
1008	 * typically 12 for ipsec
1009	 */
1010	desc->ptr[4].len = cpu_to_be16(cryptlen);
1011	desc->ptr[4].j_extent = authsize;
1012
1013	sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
1014				  (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1015							   : DMA_TO_DEVICE,
1016				  edesc->src_chained);
1017
1018	if (sg_count == 1) {
1019		to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src));
1020	} else {
1021		sg_link_tbl_len = cryptlen;
1022
1023		if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1024			sg_link_tbl_len = cryptlen + authsize;
1025
1026		sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
1027					  &edesc->link_tbl[0]);
1028		if (sg_count > 1) {
1029			desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1030			to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl);
1031			dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1032						   edesc->dma_len,
1033						   DMA_BIDIRECTIONAL);
1034		} else {
1035			/* Only one segment now, so no link tbl needed */
1036			to_talitos_ptr(&desc->ptr[4],
1037				       sg_dma_address(areq->src));
1038		}
1039	}
1040
1041	/* cipher out */
1042	desc->ptr[5].len = cpu_to_be16(cryptlen);
1043	desc->ptr[5].j_extent = authsize;
1044
1045	if (areq->src != areq->dst)
1046		sg_count = talitos_map_sg(dev, areq->dst,
1047					  edesc->dst_nents ? : 1,
1048					  DMA_FROM_DEVICE, edesc->dst_chained);
1049
1050	if (sg_count == 1) {
1051		to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst));
1052	} else {
1053		int tbl_off = edesc->src_nents + 1;
1054		struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1055
1056		to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1057			       tbl_off * sizeof(struct talitos_ptr));
1058		sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1059					  tbl_ptr);
1060
1061		/* Add an entry to the link table for ICV data */
1062		tbl_ptr += sg_count - 1;
1063		tbl_ptr->j_extent = 0;
1064		tbl_ptr++;
1065		tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1066		tbl_ptr->len = cpu_to_be16(authsize);
1067
1068		/* icv data follows link tables */
1069		to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
1070			       (tbl_off + edesc->dst_nents + 1 +
1071				edesc->assoc_nents) *
1072			       sizeof(struct talitos_ptr));
1073		desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1074		dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1075					   edesc->dma_len, DMA_BIDIRECTIONAL);
1076	}
1077
1078	/* iv out */
1079	map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0,
1080			       DMA_FROM_DEVICE);
1081
1082	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1083	if (ret != -EINPROGRESS) {
1084		ipsec_esp_unmap(dev, edesc, areq);
1085		kfree(edesc);
1086	}
1087	return ret;
1088}
1089
1090/*
1091 * derive number of elements in scatterlist
1092 */
1093static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained)
1094{
1095	struct scatterlist *sg = sg_list;
1096	int sg_nents = 0;
1097
1098	*chained = false;
1099	while (nbytes > 0) {
1100		sg_nents++;
1101		nbytes -= sg->length;
1102		if (!sg_is_last(sg) && (sg + 1)->length == 0)
1103			*chained = true;
1104		sg = sg_next(sg);
1105	}
1106
1107	return sg_nents;
1108}
1109
1110/*
1111 * allocate and map the extended descriptor
1112 */
1113static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1114						 struct scatterlist *assoc,
1115						 struct scatterlist *src,
1116						 struct scatterlist *dst,
1117						 u8 *iv,
1118						 unsigned int assoclen,
1119						 unsigned int cryptlen,
1120						 unsigned int authsize,
1121						 unsigned int ivsize,
1122						 int icv_stashing,
1123						 u32 cryptoflags,
1124						 bool encrypt)
1125{
1126	struct talitos_edesc *edesc;
1127	int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len;
1128	bool assoc_chained = false, src_chained = false, dst_chained = false;
1129	dma_addr_t iv_dma = 0;
1130	gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1131		      GFP_ATOMIC;
1132
1133	if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) {
1134		dev_err(dev, "length exceeds h/w max limit\n");
1135		return ERR_PTR(-EINVAL);
1136	}
1137
1138	if (ivsize)
1139		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1140
1141	if (assoclen) {
1142		/*
1143		 * Currently it is assumed that iv is provided whenever assoc
1144		 * is.
1145		 */
1146		BUG_ON(!iv);
1147
1148		assoc_nents = sg_count(assoc, assoclen, &assoc_chained);
1149		talitos_map_sg(dev, assoc, assoc_nents, DMA_TO_DEVICE,
1150			       assoc_chained);
1151		assoc_nents = (assoc_nents == 1) ? 0 : assoc_nents;
1152
1153		if (assoc_nents || sg_dma_address(assoc) + assoclen != iv_dma)
1154			assoc_nents = assoc_nents ? assoc_nents + 1 : 2;
1155	}
1156
1157	if (!dst || dst == src) {
1158		src_nents = sg_count(src, cryptlen + authsize, &src_chained);
1159		src_nents = (src_nents == 1) ? 0 : src_nents;
1160		dst_nents = dst ? src_nents : 0;
1161	} else { /* dst && dst != src*/
1162		src_nents = sg_count(src, cryptlen + (encrypt ? 0 : authsize),
1163				     &src_chained);
1164		src_nents = (src_nents == 1) ? 0 : src_nents;
1165		dst_nents = sg_count(dst, cryptlen + (encrypt ? authsize : 0),
1166				     &dst_chained);
1167		dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1168	}
1169
1170	/*
1171	 * allocate space for base edesc plus the link tables,
1172	 * allowing for two separate entries for ICV and generated ICV (+ 2),
1173	 * and the ICV data itself
1174	 */
1175	alloc_len = sizeof(struct talitos_edesc);
1176	if (assoc_nents || src_nents || dst_nents) {
1177		dma_len = (src_nents + dst_nents + 2 + assoc_nents) *
1178			  sizeof(struct talitos_ptr) + authsize;
1179		alloc_len += dma_len;
1180	} else {
1181		dma_len = 0;
1182		alloc_len += icv_stashing ? authsize : 0;
1183	}
1184
1185	edesc = kmalloc(alloc_len, GFP_DMA | flags);
1186	if (!edesc) {
1187		if (assoc_chained)
1188			talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE);
1189		else if (assoclen)
1190			dma_unmap_sg(dev, assoc,
1191				     assoc_nents ? assoc_nents - 1 : 1,
1192				     DMA_TO_DEVICE);
1193
1194		if (iv_dma)
1195			dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1196
1197		dev_err(dev, "could not allocate edescriptor\n");
1198		return ERR_PTR(-ENOMEM);
1199	}
1200
1201	edesc->assoc_nents = assoc_nents;
1202	edesc->src_nents = src_nents;
1203	edesc->dst_nents = dst_nents;
1204	edesc->assoc_chained = assoc_chained;
1205	edesc->src_chained = src_chained;
1206	edesc->dst_chained = dst_chained;
1207	edesc->iv_dma = iv_dma;
1208	edesc->dma_len = dma_len;
1209	if (dma_len)
1210		edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1211						     edesc->dma_len,
1212						     DMA_BIDIRECTIONAL);
1213
1214	return edesc;
1215}
1216
1217static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1218					      int icv_stashing, bool encrypt)
1219{
1220	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1221	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1222	unsigned int ivsize = crypto_aead_ivsize(authenc);
1223
1224	return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst,
1225				   iv, areq->assoclen, areq->cryptlen,
1226				   ctx->authsize, ivsize, icv_stashing,
1227				   areq->base.flags, encrypt);
1228}
1229
1230static int aead_encrypt(struct aead_request *req)
1231{
1232	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1233	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1234	struct talitos_edesc *edesc;
1235
1236	/* allocate extended descriptor */
1237	edesc = aead_edesc_alloc(req, req->iv, 0, true);
1238	if (IS_ERR(edesc))
1239		return PTR_ERR(edesc);
1240
1241	/* set encrypt */
1242	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1243
1244	return ipsec_esp(edesc, req, 0, ipsec_esp_encrypt_done);
1245}
1246
1247static int aead_decrypt(struct aead_request *req)
1248{
1249	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1250	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1251	unsigned int authsize = ctx->authsize;
1252	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1253	struct talitos_edesc *edesc;
1254	struct scatterlist *sg;
1255	void *icvdata;
1256
1257	req->cryptlen -= authsize;
1258
1259	/* allocate extended descriptor */
1260	edesc = aead_edesc_alloc(req, req->iv, 1, false);
1261	if (IS_ERR(edesc))
1262		return PTR_ERR(edesc);
1263
1264	if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1265	    ((!edesc->src_nents && !edesc->dst_nents) ||
1266	     priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1267
1268		/* decrypt and check the ICV */
1269		edesc->desc.hdr = ctx->desc_hdr_template |
1270				  DESC_HDR_DIR_INBOUND |
1271				  DESC_HDR_MODE1_MDEU_CICV;
1272
1273		/* reset integrity check result bits */
1274		edesc->desc.hdr_lo = 0;
1275
1276		return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_hwauth_done);
1277	}
1278
1279	/* Have to check the ICV with software */
1280	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1281
1282	/* stash incoming ICV for later cmp with ICV generated by the h/w */
1283	if (edesc->dma_len)
1284		icvdata = &edesc->link_tbl[edesc->src_nents +
1285					   edesc->dst_nents + 2 +
1286					   edesc->assoc_nents];
1287	else
1288		icvdata = &edesc->link_tbl[0];
1289
1290	sg = sg_last(req->src, edesc->src_nents ? : 1);
1291
1292	memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
1293	       ctx->authsize);
1294
1295	return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_swauth_done);
1296}
1297
1298static int aead_givencrypt(struct aead_givcrypt_request *req)
1299{
1300	struct aead_request *areq = &req->areq;
1301	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1302	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1303	struct talitos_edesc *edesc;
1304
1305	/* allocate extended descriptor */
1306	edesc = aead_edesc_alloc(areq, req->giv, 0, true);
1307	if (IS_ERR(edesc))
1308		return PTR_ERR(edesc);
1309
1310	/* set encrypt */
1311	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1312
1313	memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
1314	/* avoid consecutive packets going out with same IV */
1315	*(__be64 *)req->giv ^= cpu_to_be64(req->seq);
1316
1317	return ipsec_esp(edesc, areq, req->seq, ipsec_esp_encrypt_done);
1318}
1319
1320static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1321			     const u8 *key, unsigned int keylen)
1322{
1323	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1324
1325	memcpy(&ctx->key, key, keylen);
1326	ctx->keylen = keylen;
1327
1328	return 0;
1329}
1330
1331static void common_nonsnoop_unmap(struct device *dev,
1332				  struct talitos_edesc *edesc,
1333				  struct ablkcipher_request *areq)
1334{
1335	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1336	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1337	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1338
1339	talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
1340
1341	if (edesc->dma_len)
1342		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1343				 DMA_BIDIRECTIONAL);
1344}
1345
1346static void ablkcipher_done(struct device *dev,
1347			    struct talitos_desc *desc, void *context,
1348			    int err)
1349{
1350	struct ablkcipher_request *areq = context;
1351	struct talitos_edesc *edesc;
1352
1353	edesc = container_of(desc, struct talitos_edesc, desc);
1354
1355	common_nonsnoop_unmap(dev, edesc, areq);
1356
1357	kfree(edesc);
1358
1359	areq->base.complete(&areq->base, err);
1360}
1361
1362static int common_nonsnoop(struct talitos_edesc *edesc,
1363			   struct ablkcipher_request *areq,
1364			   void (*callback) (struct device *dev,
1365					     struct talitos_desc *desc,
1366					     void *context, int error))
1367{
1368	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1369	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1370	struct device *dev = ctx->dev;
1371	struct talitos_desc *desc = &edesc->desc;
1372	unsigned int cryptlen = areq->nbytes;
1373	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1374	int sg_count, ret;
1375
1376	/* first DWORD empty */
1377	desc->ptr[0].len = 0;
1378	to_talitos_ptr(&desc->ptr[0], 0);
1379	desc->ptr[0].j_extent = 0;
1380
1381	/* cipher iv */
1382	to_talitos_ptr(&desc->ptr[1], edesc->iv_dma);
1383	desc->ptr[1].len = cpu_to_be16(ivsize);
1384	desc->ptr[1].j_extent = 0;
1385
1386	/* cipher key */
1387	map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1388			       (char *)&ctx->key, 0, DMA_TO_DEVICE);
1389
1390	/*
1391	 * cipher in
1392	 */
1393	desc->ptr[3].len = cpu_to_be16(cryptlen);
1394	desc->ptr[3].j_extent = 0;
1395
1396	sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
1397				  (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1398							   : DMA_TO_DEVICE,
1399				  edesc->src_chained);
1400
1401	if (sg_count == 1) {
1402		to_talitos_ptr(&desc->ptr[3], sg_dma_address(areq->src));
1403	} else {
1404		sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
1405					  &edesc->link_tbl[0]);
1406		if (sg_count > 1) {
1407			to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
1408			desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
1409			dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1410						   edesc->dma_len,
1411						   DMA_BIDIRECTIONAL);
1412		} else {
1413			/* Only one segment now, so no link tbl needed */
1414			to_talitos_ptr(&desc->ptr[3],
1415				       sg_dma_address(areq->src));
1416		}
1417	}
1418
1419	/* cipher out */
1420	desc->ptr[4].len = cpu_to_be16(cryptlen);
1421	desc->ptr[4].j_extent = 0;
1422
1423	if (areq->src != areq->dst)
1424		sg_count = talitos_map_sg(dev, areq->dst,
1425					  edesc->dst_nents ? : 1,
1426					  DMA_FROM_DEVICE, edesc->dst_chained);
1427
1428	if (sg_count == 1) {
1429		to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->dst));
1430	} else {
1431		struct talitos_ptr *link_tbl_ptr =
1432			&edesc->link_tbl[edesc->src_nents + 1];
1433
1434		to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1435					      (edesc->src_nents + 1) *
1436					      sizeof(struct talitos_ptr));
1437		desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1438		sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1439					  link_tbl_ptr);
1440		dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1441					   edesc->dma_len, DMA_BIDIRECTIONAL);
1442	}
1443
1444	/* iv out */
1445	map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 0,
1446			       DMA_FROM_DEVICE);
1447
1448	/* last DWORD empty */
1449	desc->ptr[6].len = 0;
1450	to_talitos_ptr(&desc->ptr[6], 0);
1451	desc->ptr[6].j_extent = 0;
1452
1453	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1454	if (ret != -EINPROGRESS) {
1455		common_nonsnoop_unmap(dev, edesc, areq);
1456		kfree(edesc);
1457	}
1458	return ret;
1459}
1460
1461static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1462						    areq, bool encrypt)
1463{
1464	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1465	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1466	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1467
1468	return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst,
1469				   areq->info, 0, areq->nbytes, 0, ivsize, 0,
1470				   areq->base.flags, encrypt);
1471}
1472
1473static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1474{
1475	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1476	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1477	struct talitos_edesc *edesc;
1478
1479	/* allocate extended descriptor */
1480	edesc = ablkcipher_edesc_alloc(areq, true);
1481	if (IS_ERR(edesc))
1482		return PTR_ERR(edesc);
1483
1484	/* set encrypt */
1485	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1486
1487	return common_nonsnoop(edesc, areq, ablkcipher_done);
1488}
1489
1490static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1491{
1492	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1493	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1494	struct talitos_edesc *edesc;
1495
1496	/* allocate extended descriptor */
1497	edesc = ablkcipher_edesc_alloc(areq, false);
1498	if (IS_ERR(edesc))
1499		return PTR_ERR(edesc);
1500
1501	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1502
1503	return common_nonsnoop(edesc, areq, ablkcipher_done);
1504}
1505
1506static void common_nonsnoop_hash_unmap(struct device *dev,
1507				       struct talitos_edesc *edesc,
1508				       struct ahash_request *areq)
1509{
1510	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1511
1512	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1513
1514	/* When using hashctx-in, must unmap it. */
1515	if (edesc->desc.ptr[1].len)
1516		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1517					 DMA_TO_DEVICE);
1518
1519	if (edesc->desc.ptr[2].len)
1520		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1521					 DMA_TO_DEVICE);
1522
1523	talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL);
1524
1525	if (edesc->dma_len)
1526		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1527				 DMA_BIDIRECTIONAL);
1528
1529}
1530
1531static void ahash_done(struct device *dev,
1532		       struct talitos_desc *desc, void *context,
1533		       int err)
1534{
1535	struct ahash_request *areq = context;
1536	struct talitos_edesc *edesc =
1537		 container_of(desc, struct talitos_edesc, desc);
1538	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1539
1540	if (!req_ctx->last && req_ctx->to_hash_later) {
1541		/* Position any partial block for next update/final/finup */
1542		memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1543		req_ctx->nbuf = req_ctx->to_hash_later;
1544	}
1545	common_nonsnoop_hash_unmap(dev, edesc, areq);
1546
1547	kfree(edesc);
1548
1549	areq->base.complete(&areq->base, err);
1550}
1551
1552static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1553				struct ahash_request *areq, unsigned int length,
1554				void (*callback) (struct device *dev,
1555						  struct talitos_desc *desc,
1556						  void *context, int error))
1557{
1558	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1559	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1560	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1561	struct device *dev = ctx->dev;
1562	struct talitos_desc *desc = &edesc->desc;
1563	int sg_count, ret;
1564
1565	/* first DWORD empty */
1566	desc->ptr[0] = zero_entry;
1567
1568	/* hash context in */
1569	if (!req_ctx->first || req_ctx->swinit) {
1570		map_single_talitos_ptr(dev, &desc->ptr[1],
1571				       req_ctx->hw_context_size,
1572				       (char *)req_ctx->hw_context, 0,
1573				       DMA_TO_DEVICE);
1574		req_ctx->swinit = 0;
1575	} else {
1576		desc->ptr[1] = zero_entry;
1577		/* Indicate next op is not the first. */
1578		req_ctx->first = 0;
1579	}
1580
1581	/* HMAC key */
1582	if (ctx->keylen)
1583		map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1584				       (char *)&ctx->key, 0, DMA_TO_DEVICE);
1585	else
1586		desc->ptr[2] = zero_entry;
1587
1588	/*
1589	 * data in
1590	 */
1591	desc->ptr[3].len = cpu_to_be16(length);
1592	desc->ptr[3].j_extent = 0;
1593
1594	sg_count = talitos_map_sg(dev, req_ctx->psrc,
1595				  edesc->src_nents ? : 1,
1596				  DMA_TO_DEVICE, edesc->src_chained);
1597
1598	if (sg_count == 1) {
1599		to_talitos_ptr(&desc->ptr[3], sg_dma_address(req_ctx->psrc));
1600	} else {
1601		sg_count = sg_to_link_tbl(req_ctx->psrc, sg_count, length,
1602					  &edesc->link_tbl[0]);
1603		if (sg_count > 1) {
1604			desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
1605			to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
1606			dma_sync_single_for_device(ctx->dev,
1607						   edesc->dma_link_tbl,
1608						   edesc->dma_len,
1609						   DMA_BIDIRECTIONAL);
1610		} else {
1611			/* Only one segment now, so no link tbl needed */
1612			to_talitos_ptr(&desc->ptr[3],
1613				       sg_dma_address(req_ctx->psrc));
1614		}
1615	}
1616
1617	/* fifth DWORD empty */
1618	desc->ptr[4] = zero_entry;
1619
1620	/* hash/HMAC out -or- hash context out */
1621	if (req_ctx->last)
1622		map_single_talitos_ptr(dev, &desc->ptr[5],
1623				       crypto_ahash_digestsize(tfm),
1624				       areq->result, 0, DMA_FROM_DEVICE);
1625	else
1626		map_single_talitos_ptr(dev, &desc->ptr[5],
1627				       req_ctx->hw_context_size,
1628				       req_ctx->hw_context, 0, DMA_FROM_DEVICE);
1629
1630	/* last DWORD empty */
1631	desc->ptr[6] = zero_entry;
1632
1633	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1634	if (ret != -EINPROGRESS) {
1635		common_nonsnoop_hash_unmap(dev, edesc, areq);
1636		kfree(edesc);
1637	}
1638	return ret;
1639}
1640
1641static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1642					       unsigned int nbytes)
1643{
1644	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1645	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1646	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1647
1648	return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0,
1649				   nbytes, 0, 0, 0, areq->base.flags, false);
1650}
1651
1652static int ahash_init(struct ahash_request *areq)
1653{
1654	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1655	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1656
1657	/* Initialize the context */
1658	req_ctx->nbuf = 0;
1659	req_ctx->first = 1; /* first indicates h/w must init its context */
1660	req_ctx->swinit = 0; /* assume h/w init of context */
1661	req_ctx->hw_context_size =
1662		(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1663			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1664			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1665
1666	return 0;
1667}
1668
1669/*
1670 * on h/w without explicit sha224 support, we initialize h/w context
1671 * manually with sha224 constants, and tell it to run sha256.
1672 */
1673static int ahash_init_sha224_swinit(struct ahash_request *areq)
1674{
1675	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1676
1677	ahash_init(areq);
1678	req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1679
1680	req_ctx->hw_context[0] = SHA224_H0;
1681	req_ctx->hw_context[1] = SHA224_H1;
1682	req_ctx->hw_context[2] = SHA224_H2;
1683	req_ctx->hw_context[3] = SHA224_H3;
1684	req_ctx->hw_context[4] = SHA224_H4;
1685	req_ctx->hw_context[5] = SHA224_H5;
1686	req_ctx->hw_context[6] = SHA224_H6;
1687	req_ctx->hw_context[7] = SHA224_H7;
1688
1689	/* init 64-bit count */
1690	req_ctx->hw_context[8] = 0;
1691	req_ctx->hw_context[9] = 0;
1692
1693	return 0;
1694}
1695
1696static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1697{
1698	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1699	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1700	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1701	struct talitos_edesc *edesc;
1702	unsigned int blocksize =
1703			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1704	unsigned int nbytes_to_hash;
1705	unsigned int to_hash_later;
1706	unsigned int nsg;
1707	bool chained;
1708
1709	if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1710		/* Buffer up to one whole block */
1711		sg_copy_to_buffer(areq->src,
1712				  sg_count(areq->src, nbytes, &chained),
1713				  req_ctx->buf + req_ctx->nbuf, nbytes);
1714		req_ctx->nbuf += nbytes;
1715		return 0;
1716	}
1717
1718	/* At least (blocksize + 1) bytes are available to hash */
1719	nbytes_to_hash = nbytes + req_ctx->nbuf;
1720	to_hash_later = nbytes_to_hash & (blocksize - 1);
1721
1722	if (req_ctx->last)
1723		to_hash_later = 0;
1724	else if (to_hash_later)
1725		/* There is a partial block. Hash the full block(s) now */
1726		nbytes_to_hash -= to_hash_later;
1727	else {
1728		/* Keep one block buffered */
1729		nbytes_to_hash -= blocksize;
1730		to_hash_later = blocksize;
1731	}
1732
1733	/* Chain in any previously buffered data */
1734	if (req_ctx->nbuf) {
1735		nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1736		sg_init_table(req_ctx->bufsl, nsg);
1737		sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1738		if (nsg > 1)
1739			scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src);
1740		req_ctx->psrc = req_ctx->bufsl;
1741	} else
1742		req_ctx->psrc = areq->src;
1743
1744	if (to_hash_later) {
1745		int nents = sg_count(areq->src, nbytes, &chained);
1746		sg_pcopy_to_buffer(areq->src, nents,
1747				      req_ctx->bufnext,
1748				      to_hash_later,
1749				      nbytes - to_hash_later);
1750	}
1751	req_ctx->to_hash_later = to_hash_later;
1752
1753	/* Allocate extended descriptor */
1754	edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1755	if (IS_ERR(edesc))
1756		return PTR_ERR(edesc);
1757
1758	edesc->desc.hdr = ctx->desc_hdr_template;
1759
1760	/* On last one, request SEC to pad; otherwise continue */
1761	if (req_ctx->last)
1762		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1763	else
1764		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1765
1766	/* request SEC to INIT hash. */
1767	if (req_ctx->first && !req_ctx->swinit)
1768		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1769
1770	/* When the tfm context has a keylen, it's an HMAC.
1771	 * A first or last (ie. not middle) descriptor must request HMAC.
1772	 */
1773	if (ctx->keylen && (req_ctx->first || req_ctx->last))
1774		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1775
1776	return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1777				    ahash_done);
1778}
1779
1780static int ahash_update(struct ahash_request *areq)
1781{
1782	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1783
1784	req_ctx->last = 0;
1785
1786	return ahash_process_req(areq, areq->nbytes);
1787}
1788
1789static int ahash_final(struct ahash_request *areq)
1790{
1791	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1792
1793	req_ctx->last = 1;
1794
1795	return ahash_process_req(areq, 0);
1796}
1797
1798static int ahash_finup(struct ahash_request *areq)
1799{
1800	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1801
1802	req_ctx->last = 1;
1803
1804	return ahash_process_req(areq, areq->nbytes);
1805}
1806
1807static int ahash_digest(struct ahash_request *areq)
1808{
1809	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1810	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
1811
1812	ahash->init(areq);
1813	req_ctx->last = 1;
1814
1815	return ahash_process_req(areq, areq->nbytes);
1816}
1817
1818struct keyhash_result {
1819	struct completion completion;
1820	int err;
1821};
1822
1823static void keyhash_complete(struct crypto_async_request *req, int err)
1824{
1825	struct keyhash_result *res = req->data;
1826
1827	if (err == -EINPROGRESS)
1828		return;
1829
1830	res->err = err;
1831	complete(&res->completion);
1832}
1833
1834static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
1835		   u8 *hash)
1836{
1837	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1838
1839	struct scatterlist sg[1];
1840	struct ahash_request *req;
1841	struct keyhash_result hresult;
1842	int ret;
1843
1844	init_completion(&hresult.completion);
1845
1846	req = ahash_request_alloc(tfm, GFP_KERNEL);
1847	if (!req)
1848		return -ENOMEM;
1849
1850	/* Keep tfm keylen == 0 during hash of the long key */
1851	ctx->keylen = 0;
1852	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1853				   keyhash_complete, &hresult);
1854
1855	sg_init_one(&sg[0], key, keylen);
1856
1857	ahash_request_set_crypt(req, sg, hash, keylen);
1858	ret = crypto_ahash_digest(req);
1859	switch (ret) {
1860	case 0:
1861		break;
1862	case -EINPROGRESS:
1863	case -EBUSY:
1864		ret = wait_for_completion_interruptible(
1865			&hresult.completion);
1866		if (!ret)
1867			ret = hresult.err;
1868		break;
1869	default:
1870		break;
1871	}
1872	ahash_request_free(req);
1873
1874	return ret;
1875}
1876
1877static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
1878			unsigned int keylen)
1879{
1880	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1881	unsigned int blocksize =
1882			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1883	unsigned int digestsize = crypto_ahash_digestsize(tfm);
1884	unsigned int keysize = keylen;
1885	u8 hash[SHA512_DIGEST_SIZE];
1886	int ret;
1887
1888	if (keylen <= blocksize)
1889		memcpy(ctx->key, key, keysize);
1890	else {
1891		/* Must get the hash of the long key */
1892		ret = keyhash(tfm, key, keylen, hash);
1893
1894		if (ret) {
1895			crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1896			return -EINVAL;
1897		}
1898
1899		keysize = digestsize;
1900		memcpy(ctx->key, hash, digestsize);
1901	}
1902
1903	ctx->keylen = keysize;
1904
1905	return 0;
1906}
1907
1908
1909struct talitos_alg_template {
1910	u32 type;
1911	union {
1912		struct crypto_alg crypto;
1913		struct ahash_alg hash;
1914	} alg;
1915	__be32 desc_hdr_template;
1916};
1917
1918static struct talitos_alg_template driver_algs[] = {
1919	/* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
1920	{	.type = CRYPTO_ALG_TYPE_AEAD,
1921		.alg.crypto = {
1922			.cra_name = "authenc(hmac(sha1),cbc(aes))",
1923			.cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
1924			.cra_blocksize = AES_BLOCK_SIZE,
1925			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1926			.cra_aead = {
1927				.ivsize = AES_BLOCK_SIZE,
1928				.maxauthsize = SHA1_DIGEST_SIZE,
1929			}
1930		},
1931		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1932			             DESC_HDR_SEL0_AESU |
1933		                     DESC_HDR_MODE0_AESU_CBC |
1934		                     DESC_HDR_SEL1_MDEUA |
1935		                     DESC_HDR_MODE1_MDEU_INIT |
1936		                     DESC_HDR_MODE1_MDEU_PAD |
1937		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1938	},
1939	{	.type = CRYPTO_ALG_TYPE_AEAD,
1940		.alg.crypto = {
1941			.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1942			.cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
1943			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1944			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1945			.cra_aead = {
1946				.ivsize = DES3_EDE_BLOCK_SIZE,
1947				.maxauthsize = SHA1_DIGEST_SIZE,
1948			}
1949		},
1950		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1951			             DESC_HDR_SEL0_DEU |
1952		                     DESC_HDR_MODE0_DEU_CBC |
1953		                     DESC_HDR_MODE0_DEU_3DES |
1954		                     DESC_HDR_SEL1_MDEUA |
1955		                     DESC_HDR_MODE1_MDEU_INIT |
1956		                     DESC_HDR_MODE1_MDEU_PAD |
1957		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1958	},
1959	{       .type = CRYPTO_ALG_TYPE_AEAD,
1960		.alg.crypto = {
1961			.cra_name = "authenc(hmac(sha224),cbc(aes))",
1962			.cra_driver_name = "authenc-hmac-sha224-cbc-aes-talitos",
1963			.cra_blocksize = AES_BLOCK_SIZE,
1964			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1965			.cra_aead = {
1966				.ivsize = AES_BLOCK_SIZE,
1967				.maxauthsize = SHA224_DIGEST_SIZE,
1968			}
1969		},
1970		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1971				     DESC_HDR_SEL0_AESU |
1972				     DESC_HDR_MODE0_AESU_CBC |
1973				     DESC_HDR_SEL1_MDEUA |
1974				     DESC_HDR_MODE1_MDEU_INIT |
1975				     DESC_HDR_MODE1_MDEU_PAD |
1976				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
1977	},
1978	{	.type = CRYPTO_ALG_TYPE_AEAD,
1979		.alg.crypto = {
1980			.cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
1981			.cra_driver_name = "authenc-hmac-sha224-cbc-3des-talitos",
1982			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1983			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1984			.cra_aead = {
1985				.ivsize = DES3_EDE_BLOCK_SIZE,
1986				.maxauthsize = SHA224_DIGEST_SIZE,
1987			}
1988		},
1989		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1990			             DESC_HDR_SEL0_DEU |
1991		                     DESC_HDR_MODE0_DEU_CBC |
1992		                     DESC_HDR_MODE0_DEU_3DES |
1993		                     DESC_HDR_SEL1_MDEUA |
1994		                     DESC_HDR_MODE1_MDEU_INIT |
1995		                     DESC_HDR_MODE1_MDEU_PAD |
1996		                     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
1997	},
1998	{	.type = CRYPTO_ALG_TYPE_AEAD,
1999		.alg.crypto = {
2000			.cra_name = "authenc(hmac(sha256),cbc(aes))",
2001			.cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
2002			.cra_blocksize = AES_BLOCK_SIZE,
2003			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2004			.cra_aead = {
2005				.ivsize = AES_BLOCK_SIZE,
2006				.maxauthsize = SHA256_DIGEST_SIZE,
2007			}
2008		},
2009		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2010			             DESC_HDR_SEL0_AESU |
2011		                     DESC_HDR_MODE0_AESU_CBC |
2012		                     DESC_HDR_SEL1_MDEUA |
2013		                     DESC_HDR_MODE1_MDEU_INIT |
2014		                     DESC_HDR_MODE1_MDEU_PAD |
2015		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2016	},
2017	{	.type = CRYPTO_ALG_TYPE_AEAD,
2018		.alg.crypto = {
2019			.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
2020			.cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
2021			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2022			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2023			.cra_aead = {
2024				.ivsize = DES3_EDE_BLOCK_SIZE,
2025				.maxauthsize = SHA256_DIGEST_SIZE,
2026			}
2027		},
2028		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2029			             DESC_HDR_SEL0_DEU |
2030		                     DESC_HDR_MODE0_DEU_CBC |
2031		                     DESC_HDR_MODE0_DEU_3DES |
2032		                     DESC_HDR_SEL1_MDEUA |
2033		                     DESC_HDR_MODE1_MDEU_INIT |
2034		                     DESC_HDR_MODE1_MDEU_PAD |
2035		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2036	},
2037	{	.type = CRYPTO_ALG_TYPE_AEAD,
2038		.alg.crypto = {
2039			.cra_name = "authenc(hmac(sha384),cbc(aes))",
2040			.cra_driver_name = "authenc-hmac-sha384-cbc-aes-talitos",
2041			.cra_blocksize = AES_BLOCK_SIZE,
2042			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2043			.cra_aead = {
2044				.ivsize = AES_BLOCK_SIZE,
2045				.maxauthsize = SHA384_DIGEST_SIZE,
2046			}
2047		},
2048		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2049			             DESC_HDR_SEL0_AESU |
2050		                     DESC_HDR_MODE0_AESU_CBC |
2051		                     DESC_HDR_SEL1_MDEUB |
2052		                     DESC_HDR_MODE1_MDEU_INIT |
2053		                     DESC_HDR_MODE1_MDEU_PAD |
2054		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2055	},
2056	{	.type = CRYPTO_ALG_TYPE_AEAD,
2057		.alg.crypto = {
2058			.cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
2059			.cra_driver_name = "authenc-hmac-sha384-cbc-3des-talitos",
2060			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2061			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2062			.cra_aead = {
2063				.ivsize = DES3_EDE_BLOCK_SIZE,
2064				.maxauthsize = SHA384_DIGEST_SIZE,
2065			}
2066		},
2067		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2068			             DESC_HDR_SEL0_DEU |
2069		                     DESC_HDR_MODE0_DEU_CBC |
2070		                     DESC_HDR_MODE0_DEU_3DES |
2071		                     DESC_HDR_SEL1_MDEUB |
2072		                     DESC_HDR_MODE1_MDEU_INIT |
2073		                     DESC_HDR_MODE1_MDEU_PAD |
2074		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2075	},
2076	{	.type = CRYPTO_ALG_TYPE_AEAD,
2077		.alg.crypto = {
2078			.cra_name = "authenc(hmac(sha512),cbc(aes))",
2079			.cra_driver_name = "authenc-hmac-sha512-cbc-aes-talitos",
2080			.cra_blocksize = AES_BLOCK_SIZE,
2081			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2082			.cra_aead = {
2083				.ivsize = AES_BLOCK_SIZE,
2084				.maxauthsize = SHA512_DIGEST_SIZE,
2085			}
2086		},
2087		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2088			             DESC_HDR_SEL0_AESU |
2089		                     DESC_HDR_MODE0_AESU_CBC |
2090		                     DESC_HDR_SEL1_MDEUB |
2091		                     DESC_HDR_MODE1_MDEU_INIT |
2092		                     DESC_HDR_MODE1_MDEU_PAD |
2093		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2094	},
2095	{	.type = CRYPTO_ALG_TYPE_AEAD,
2096		.alg.crypto = {
2097			.cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
2098			.cra_driver_name = "authenc-hmac-sha512-cbc-3des-talitos",
2099			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2100			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2101			.cra_aead = {
2102				.ivsize = DES3_EDE_BLOCK_SIZE,
2103				.maxauthsize = SHA512_DIGEST_SIZE,
2104			}
2105		},
2106		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2107			             DESC_HDR_SEL0_DEU |
2108		                     DESC_HDR_MODE0_DEU_CBC |
2109		                     DESC_HDR_MODE0_DEU_3DES |
2110		                     DESC_HDR_SEL1_MDEUB |
2111		                     DESC_HDR_MODE1_MDEU_INIT |
2112		                     DESC_HDR_MODE1_MDEU_PAD |
2113		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2114	},
2115	{	.type = CRYPTO_ALG_TYPE_AEAD,
2116		.alg.crypto = {
2117			.cra_name = "authenc(hmac(md5),cbc(aes))",
2118			.cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
2119			.cra_blocksize = AES_BLOCK_SIZE,
2120			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2121			.cra_aead = {
2122				.ivsize = AES_BLOCK_SIZE,
2123				.maxauthsize = MD5_DIGEST_SIZE,
2124			}
2125		},
2126		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2127			             DESC_HDR_SEL0_AESU |
2128		                     DESC_HDR_MODE0_AESU_CBC |
2129		                     DESC_HDR_SEL1_MDEUA |
2130		                     DESC_HDR_MODE1_MDEU_INIT |
2131		                     DESC_HDR_MODE1_MDEU_PAD |
2132		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2133	},
2134	{	.type = CRYPTO_ALG_TYPE_AEAD,
2135		.alg.crypto = {
2136			.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2137			.cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
2138			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2139			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2140			.cra_aead = {
2141				.ivsize = DES3_EDE_BLOCK_SIZE,
2142				.maxauthsize = MD5_DIGEST_SIZE,
2143			}
2144		},
2145		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2146			             DESC_HDR_SEL0_DEU |
2147		                     DESC_HDR_MODE0_DEU_CBC |
2148		                     DESC_HDR_MODE0_DEU_3DES |
2149		                     DESC_HDR_SEL1_MDEUA |
2150		                     DESC_HDR_MODE1_MDEU_INIT |
2151		                     DESC_HDR_MODE1_MDEU_PAD |
2152		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2153	},
2154	/* ABLKCIPHER algorithms. */
2155	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2156		.alg.crypto = {
2157			.cra_name = "cbc(aes)",
2158			.cra_driver_name = "cbc-aes-talitos",
2159			.cra_blocksize = AES_BLOCK_SIZE,
2160			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2161                                     CRYPTO_ALG_ASYNC,
2162			.cra_ablkcipher = {
2163				.min_keysize = AES_MIN_KEY_SIZE,
2164				.max_keysize = AES_MAX_KEY_SIZE,
2165				.ivsize = AES_BLOCK_SIZE,
2166			}
2167		},
2168		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2169				     DESC_HDR_SEL0_AESU |
2170				     DESC_HDR_MODE0_AESU_CBC,
2171	},
2172	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2173		.alg.crypto = {
2174			.cra_name = "cbc(des3_ede)",
2175			.cra_driver_name = "cbc-3des-talitos",
2176			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2177			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2178                                     CRYPTO_ALG_ASYNC,
2179			.cra_ablkcipher = {
2180				.min_keysize = DES3_EDE_KEY_SIZE,
2181				.max_keysize = DES3_EDE_KEY_SIZE,
2182				.ivsize = DES3_EDE_BLOCK_SIZE,
2183			}
2184		},
2185		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2186			             DESC_HDR_SEL0_DEU |
2187		                     DESC_HDR_MODE0_DEU_CBC |
2188		                     DESC_HDR_MODE0_DEU_3DES,
2189	},
2190	/* AHASH algorithms. */
2191	{	.type = CRYPTO_ALG_TYPE_AHASH,
2192		.alg.hash = {
2193			.halg.digestsize = MD5_DIGEST_SIZE,
2194			.halg.base = {
2195				.cra_name = "md5",
2196				.cra_driver_name = "md5-talitos",
2197				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2198				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2199					     CRYPTO_ALG_ASYNC,
2200			}
2201		},
2202		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2203				     DESC_HDR_SEL0_MDEUA |
2204				     DESC_HDR_MODE0_MDEU_MD5,
2205	},
2206	{	.type = CRYPTO_ALG_TYPE_AHASH,
2207		.alg.hash = {
2208			.halg.digestsize = SHA1_DIGEST_SIZE,
2209			.halg.base = {
2210				.cra_name = "sha1",
2211				.cra_driver_name = "sha1-talitos",
2212				.cra_blocksize = SHA1_BLOCK_SIZE,
2213				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2214					     CRYPTO_ALG_ASYNC,
2215			}
2216		},
2217		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2218				     DESC_HDR_SEL0_MDEUA |
2219				     DESC_HDR_MODE0_MDEU_SHA1,
2220	},
2221	{	.type = CRYPTO_ALG_TYPE_AHASH,
2222		.alg.hash = {
2223			.halg.digestsize = SHA224_DIGEST_SIZE,
2224			.halg.base = {
2225				.cra_name = "sha224",
2226				.cra_driver_name = "sha224-talitos",
2227				.cra_blocksize = SHA224_BLOCK_SIZE,
2228				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2229					     CRYPTO_ALG_ASYNC,
2230			}
2231		},
2232		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2233				     DESC_HDR_SEL0_MDEUA |
2234				     DESC_HDR_MODE0_MDEU_SHA224,
2235	},
2236	{	.type = CRYPTO_ALG_TYPE_AHASH,
2237		.alg.hash = {
2238			.halg.digestsize = SHA256_DIGEST_SIZE,
2239			.halg.base = {
2240				.cra_name = "sha256",
2241				.cra_driver_name = "sha256-talitos",
2242				.cra_blocksize = SHA256_BLOCK_SIZE,
2243				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2244					     CRYPTO_ALG_ASYNC,
2245			}
2246		},
2247		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2248				     DESC_HDR_SEL0_MDEUA |
2249				     DESC_HDR_MODE0_MDEU_SHA256,
2250	},
2251	{	.type = CRYPTO_ALG_TYPE_AHASH,
2252		.alg.hash = {
2253			.halg.digestsize = SHA384_DIGEST_SIZE,
2254			.halg.base = {
2255				.cra_name = "sha384",
2256				.cra_driver_name = "sha384-talitos",
2257				.cra_blocksize = SHA384_BLOCK_SIZE,
2258				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2259					     CRYPTO_ALG_ASYNC,
2260			}
2261		},
2262		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2263				     DESC_HDR_SEL0_MDEUB |
2264				     DESC_HDR_MODE0_MDEUB_SHA384,
2265	},
2266	{	.type = CRYPTO_ALG_TYPE_AHASH,
2267		.alg.hash = {
2268			.halg.digestsize = SHA512_DIGEST_SIZE,
2269			.halg.base = {
2270				.cra_name = "sha512",
2271				.cra_driver_name = "sha512-talitos",
2272				.cra_blocksize = SHA512_BLOCK_SIZE,
2273				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2274					     CRYPTO_ALG_ASYNC,
2275			}
2276		},
2277		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2278				     DESC_HDR_SEL0_MDEUB |
2279				     DESC_HDR_MODE0_MDEUB_SHA512,
2280	},
2281	{	.type = CRYPTO_ALG_TYPE_AHASH,
2282		.alg.hash = {
2283			.halg.digestsize = MD5_DIGEST_SIZE,
2284			.halg.base = {
2285				.cra_name = "hmac(md5)",
2286				.cra_driver_name = "hmac-md5-talitos",
2287				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2288				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2289					     CRYPTO_ALG_ASYNC,
2290			}
2291		},
2292		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2293				     DESC_HDR_SEL0_MDEUA |
2294				     DESC_HDR_MODE0_MDEU_MD5,
2295	},
2296	{	.type = CRYPTO_ALG_TYPE_AHASH,
2297		.alg.hash = {
2298			.halg.digestsize = SHA1_DIGEST_SIZE,
2299			.halg.base = {
2300				.cra_name = "hmac(sha1)",
2301				.cra_driver_name = "hmac-sha1-talitos",
2302				.cra_blocksize = SHA1_BLOCK_SIZE,
2303				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2304					     CRYPTO_ALG_ASYNC,
2305			}
2306		},
2307		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2308				     DESC_HDR_SEL0_MDEUA |
2309				     DESC_HDR_MODE0_MDEU_SHA1,
2310	},
2311	{	.type = CRYPTO_ALG_TYPE_AHASH,
2312		.alg.hash = {
2313			.halg.digestsize = SHA224_DIGEST_SIZE,
2314			.halg.base = {
2315				.cra_name = "hmac(sha224)",
2316				.cra_driver_name = "hmac-sha224-talitos",
2317				.cra_blocksize = SHA224_BLOCK_SIZE,
2318				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2319					     CRYPTO_ALG_ASYNC,
2320			}
2321		},
2322		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2323				     DESC_HDR_SEL0_MDEUA |
2324				     DESC_HDR_MODE0_MDEU_SHA224,
2325	},
2326	{	.type = CRYPTO_ALG_TYPE_AHASH,
2327		.alg.hash = {
2328			.halg.digestsize = SHA256_DIGEST_SIZE,
2329			.halg.base = {
2330				.cra_name = "hmac(sha256)",
2331				.cra_driver_name = "hmac-sha256-talitos",
2332				.cra_blocksize = SHA256_BLOCK_SIZE,
2333				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2334					     CRYPTO_ALG_ASYNC,
2335			}
2336		},
2337		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2338				     DESC_HDR_SEL0_MDEUA |
2339				     DESC_HDR_MODE0_MDEU_SHA256,
2340	},
2341	{	.type = CRYPTO_ALG_TYPE_AHASH,
2342		.alg.hash = {
2343			.halg.digestsize = SHA384_DIGEST_SIZE,
2344			.halg.base = {
2345				.cra_name = "hmac(sha384)",
2346				.cra_driver_name = "hmac-sha384-talitos",
2347				.cra_blocksize = SHA384_BLOCK_SIZE,
2348				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2349					     CRYPTO_ALG_ASYNC,
2350			}
2351		},
2352		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2353				     DESC_HDR_SEL0_MDEUB |
2354				     DESC_HDR_MODE0_MDEUB_SHA384,
2355	},
2356	{	.type = CRYPTO_ALG_TYPE_AHASH,
2357		.alg.hash = {
2358			.halg.digestsize = SHA512_DIGEST_SIZE,
2359			.halg.base = {
2360				.cra_name = "hmac(sha512)",
2361				.cra_driver_name = "hmac-sha512-talitos",
2362				.cra_blocksize = SHA512_BLOCK_SIZE,
2363				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2364					     CRYPTO_ALG_ASYNC,
2365			}
2366		},
2367		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2368				     DESC_HDR_SEL0_MDEUB |
2369				     DESC_HDR_MODE0_MDEUB_SHA512,
2370	}
2371};
2372
2373struct talitos_crypto_alg {
2374	struct list_head entry;
2375	struct device *dev;
2376	struct talitos_alg_template algt;
2377};
2378
2379static int talitos_cra_init(struct crypto_tfm *tfm)
2380{
2381	struct crypto_alg *alg = tfm->__crt_alg;
2382	struct talitos_crypto_alg *talitos_alg;
2383	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2384	struct talitos_private *priv;
2385
2386	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2387		talitos_alg = container_of(__crypto_ahash_alg(alg),
2388					   struct talitos_crypto_alg,
2389					   algt.alg.hash);
2390	else
2391		talitos_alg = container_of(alg, struct talitos_crypto_alg,
2392					   algt.alg.crypto);
2393
2394	/* update context with ptr to dev */
2395	ctx->dev = talitos_alg->dev;
2396
2397	/* assign SEC channel to tfm in round-robin fashion */
2398	priv = dev_get_drvdata(ctx->dev);
2399	ctx->ch = atomic_inc_return(&priv->last_chan) &
2400		  (priv->num_channels - 1);
2401
2402	/* copy descriptor header template value */
2403	ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2404
2405	/* select done notification */
2406	ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2407
2408	return 0;
2409}
2410
2411static int talitos_cra_init_aead(struct crypto_tfm *tfm)
2412{
2413	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2414
2415	talitos_cra_init(tfm);
2416
2417	/* random first IV */
2418	get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
2419
2420	return 0;
2421}
2422
2423static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2424{
2425	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2426
2427	talitos_cra_init(tfm);
2428
2429	ctx->keylen = 0;
2430	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2431				 sizeof(struct talitos_ahash_req_ctx));
2432
2433	return 0;
2434}
2435
2436/*
2437 * given the alg's descriptor header template, determine whether descriptor
2438 * type and primary/secondary execution units required match the hw
2439 * capabilities description provided in the device tree node.
2440 */
2441static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2442{
2443	struct talitos_private *priv = dev_get_drvdata(dev);
2444	int ret;
2445
2446	ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2447	      (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2448
2449	if (SECONDARY_EU(desc_hdr_template))
2450		ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2451		              & priv->exec_units);
2452
2453	return ret;
2454}
2455
2456static int talitos_remove(struct platform_device *ofdev)
2457{
2458	struct device *dev = &ofdev->dev;
2459	struct talitos_private *priv = dev_get_drvdata(dev);
2460	struct talitos_crypto_alg *t_alg, *n;
2461	int i;
2462
2463	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2464		switch (t_alg->algt.type) {
2465		case CRYPTO_ALG_TYPE_ABLKCIPHER:
2466		case CRYPTO_ALG_TYPE_AEAD:
2467			crypto_unregister_alg(&t_alg->algt.alg.crypto);
2468			break;
2469		case CRYPTO_ALG_TYPE_AHASH:
2470			crypto_unregister_ahash(&t_alg->algt.alg.hash);
2471			break;
2472		}
2473		list_del(&t_alg->entry);
2474		kfree(t_alg);
2475	}
2476
2477	if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2478		talitos_unregister_rng(dev);
2479
2480	for (i = 0; i < priv->num_channels; i++)
2481		kfree(priv->chan[i].fifo);
2482
2483	kfree(priv->chan);
2484
2485	for (i = 0; i < 2; i++)
2486		if (priv->irq[i]) {
2487			free_irq(priv->irq[i], dev);
2488			irq_dispose_mapping(priv->irq[i]);
2489		}
2490
2491	tasklet_kill(&priv->done_task[0]);
2492	if (priv->irq[1])
2493		tasklet_kill(&priv->done_task[1]);
2494
2495	iounmap(priv->reg);
2496
2497	kfree(priv);
2498
2499	return 0;
2500}
2501
2502static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2503						    struct talitos_alg_template
2504						           *template)
2505{
2506	struct talitos_private *priv = dev_get_drvdata(dev);
2507	struct talitos_crypto_alg *t_alg;
2508	struct crypto_alg *alg;
2509
2510	t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2511	if (!t_alg)
2512		return ERR_PTR(-ENOMEM);
2513
2514	t_alg->algt = *template;
2515
2516	switch (t_alg->algt.type) {
2517	case CRYPTO_ALG_TYPE_ABLKCIPHER:
2518		alg = &t_alg->algt.alg.crypto;
2519		alg->cra_init = talitos_cra_init;
2520		alg->cra_type = &crypto_ablkcipher_type;
2521		alg->cra_ablkcipher.setkey = ablkcipher_setkey;
2522		alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
2523		alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
2524		alg->cra_ablkcipher.geniv = "eseqiv";
2525		break;
2526	case CRYPTO_ALG_TYPE_AEAD:
2527		alg = &t_alg->algt.alg.crypto;
2528		alg->cra_init = talitos_cra_init_aead;
2529		alg->cra_type = &crypto_aead_type;
2530		alg->cra_aead.setkey = aead_setkey;
2531		alg->cra_aead.setauthsize = aead_setauthsize;
2532		alg->cra_aead.encrypt = aead_encrypt;
2533		alg->cra_aead.decrypt = aead_decrypt;
2534		alg->cra_aead.givencrypt = aead_givencrypt;
2535		alg->cra_aead.geniv = "<built-in>";
2536		break;
2537	case CRYPTO_ALG_TYPE_AHASH:
2538		alg = &t_alg->algt.alg.hash.halg.base;
2539		alg->cra_init = talitos_cra_init_ahash;
2540		alg->cra_type = &crypto_ahash_type;
2541		t_alg->algt.alg.hash.init = ahash_init;
2542		t_alg->algt.alg.hash.update = ahash_update;
2543		t_alg->algt.alg.hash.final = ahash_final;
2544		t_alg->algt.alg.hash.finup = ahash_finup;
2545		t_alg->algt.alg.hash.digest = ahash_digest;
2546		t_alg->algt.alg.hash.setkey = ahash_setkey;
2547
2548		if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
2549		    !strncmp(alg->cra_name, "hmac", 4)) {
2550			kfree(t_alg);
2551			return ERR_PTR(-ENOTSUPP);
2552		}
2553		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
2554		    (!strcmp(alg->cra_name, "sha224") ||
2555		     !strcmp(alg->cra_name, "hmac(sha224)"))) {
2556			t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2557			t_alg->algt.desc_hdr_template =
2558					DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2559					DESC_HDR_SEL0_MDEUA |
2560					DESC_HDR_MODE0_MDEU_SHA256;
2561		}
2562		break;
2563	default:
2564		dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
2565		kfree(t_alg);
2566		return ERR_PTR(-EINVAL);
2567	}
2568
2569	alg->cra_module = THIS_MODULE;
2570	alg->cra_priority = TALITOS_CRA_PRIORITY;
2571	alg->cra_alignmask = 0;
2572	alg->cra_ctxsize = sizeof(struct talitos_ctx);
2573	alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
2574
2575	t_alg->dev = dev;
2576
2577	return t_alg;
2578}
2579
2580static int talitos_probe_irq(struct platform_device *ofdev)
2581{
2582	struct device *dev = &ofdev->dev;
2583	struct device_node *np = ofdev->dev.of_node;
2584	struct talitos_private *priv = dev_get_drvdata(dev);
2585	int err;
2586
2587	priv->irq[0] = irq_of_parse_and_map(np, 0);
2588	if (!priv->irq[0]) {
2589		dev_err(dev, "failed to map irq\n");
2590		return -EINVAL;
2591	}
2592
2593	priv->irq[1] = irq_of_parse_and_map(np, 1);
2594
2595	/* get the primary irq line */
2596	if (!priv->irq[1]) {
2597		err = request_irq(priv->irq[0], talitos_interrupt_4ch, 0,
2598				  dev_driver_string(dev), dev);
2599		goto primary_out;
2600	}
2601
2602	err = request_irq(priv->irq[0], talitos_interrupt_ch0_2, 0,
2603			  dev_driver_string(dev), dev);
2604	if (err)
2605		goto primary_out;
2606
2607	/* get the secondary irq line */
2608	err = request_irq(priv->irq[1], talitos_interrupt_ch1_3, 0,
2609			  dev_driver_string(dev), dev);
2610	if (err) {
2611		dev_err(dev, "failed to request secondary irq\n");
2612		irq_dispose_mapping(priv->irq[1]);
2613		priv->irq[1] = 0;
2614	}
2615
2616	return err;
2617
2618primary_out:
2619	if (err) {
2620		dev_err(dev, "failed to request primary irq\n");
2621		irq_dispose_mapping(priv->irq[0]);
2622		priv->irq[0] = 0;
2623	}
2624
2625	return err;
2626}
2627
2628static int talitos_probe(struct platform_device *ofdev)
2629{
2630	struct device *dev = &ofdev->dev;
2631	struct device_node *np = ofdev->dev.of_node;
2632	struct talitos_private *priv;
2633	const unsigned int *prop;
2634	int i, err;
2635
2636	priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2637	if (!priv)
2638		return -ENOMEM;
2639
2640	INIT_LIST_HEAD(&priv->alg_list);
2641
2642	dev_set_drvdata(dev, priv);
2643
2644	priv->ofdev = ofdev;
2645
2646	spin_lock_init(&priv->reg_lock);
2647
2648	err = talitos_probe_irq(ofdev);
2649	if (err)
2650		goto err_out;
2651
2652	if (!priv->irq[1]) {
2653		tasklet_init(&priv->done_task[0], talitos_done_4ch,
2654			     (unsigned long)dev);
2655	} else {
2656		tasklet_init(&priv->done_task[0], talitos_done_ch0_2,
2657			     (unsigned long)dev);
2658		tasklet_init(&priv->done_task[1], talitos_done_ch1_3,
2659			     (unsigned long)dev);
2660	}
2661
2662	priv->reg = of_iomap(np, 0);
2663	if (!priv->reg) {
2664		dev_err(dev, "failed to of_iomap\n");
2665		err = -ENOMEM;
2666		goto err_out;
2667	}
2668
2669	/* get SEC version capabilities from device tree */
2670	prop = of_get_property(np, "fsl,num-channels", NULL);
2671	if (prop)
2672		priv->num_channels = *prop;
2673
2674	prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2675	if (prop)
2676		priv->chfifo_len = *prop;
2677
2678	prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2679	if (prop)
2680		priv->exec_units = *prop;
2681
2682	prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2683	if (prop)
2684		priv->desc_types = *prop;
2685
2686	if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2687	    !priv->exec_units || !priv->desc_types) {
2688		dev_err(dev, "invalid property data in device tree node\n");
2689		err = -EINVAL;
2690		goto err_out;
2691	}
2692
2693	if (of_device_is_compatible(np, "fsl,sec3.0"))
2694		priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2695
2696	if (of_device_is_compatible(np, "fsl,sec2.1"))
2697		priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
2698				  TALITOS_FTR_SHA224_HWINIT |
2699				  TALITOS_FTR_HMAC_OK;
2700
2701	priv->chan = kzalloc(sizeof(struct talitos_channel) *
2702			     priv->num_channels, GFP_KERNEL);
2703	if (!priv->chan) {
2704		dev_err(dev, "failed to allocate channel management space\n");
2705		err = -ENOMEM;
2706		goto err_out;
2707	}
2708
2709	priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
2710
2711	for (i = 0; i < priv->num_channels; i++) {
2712		priv->chan[i].reg = priv->reg + TALITOS_CH_STRIDE * (i + 1);
2713		if (!priv->irq[1] || !(i & 1))
2714			priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
2715
2716		spin_lock_init(&priv->chan[i].head_lock);
2717		spin_lock_init(&priv->chan[i].tail_lock);
2718
2719		priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
2720					     priv->fifo_len, GFP_KERNEL);
2721		if (!priv->chan[i].fifo) {
2722			dev_err(dev, "failed to allocate request fifo %d\n", i);
2723			err = -ENOMEM;
2724			goto err_out;
2725		}
2726
2727		atomic_set(&priv->chan[i].submit_count,
2728			   -(priv->chfifo_len - 1));
2729	}
2730
2731	dma_set_mask(dev, DMA_BIT_MASK(36));
2732
2733	/* reset and initialize the h/w */
2734	err = init_device(dev);
2735	if (err) {
2736		dev_err(dev, "failed to initialize device\n");
2737		goto err_out;
2738	}
2739
2740	/* register the RNG, if available */
2741	if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
2742		err = talitos_register_rng(dev);
2743		if (err) {
2744			dev_err(dev, "failed to register hwrng: %d\n", err);
2745			goto err_out;
2746		} else
2747			dev_info(dev, "hwrng\n");
2748	}
2749
2750	/* register crypto algorithms the device supports */
2751	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2752		if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
2753			struct talitos_crypto_alg *t_alg;
2754			char *name = NULL;
2755
2756			t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
2757			if (IS_ERR(t_alg)) {
2758				err = PTR_ERR(t_alg);
2759				if (err == -ENOTSUPP)
2760					continue;
2761				goto err_out;
2762			}
2763
2764			switch (t_alg->algt.type) {
2765			case CRYPTO_ALG_TYPE_ABLKCIPHER:
2766			case CRYPTO_ALG_TYPE_AEAD:
2767				err = crypto_register_alg(
2768						&t_alg->algt.alg.crypto);
2769				name = t_alg->algt.alg.crypto.cra_driver_name;
2770				break;
2771			case CRYPTO_ALG_TYPE_AHASH:
2772				err = crypto_register_ahash(
2773						&t_alg->algt.alg.hash);
2774				name =
2775				 t_alg->algt.alg.hash.halg.base.cra_driver_name;
2776				break;
2777			}
2778			if (err) {
2779				dev_err(dev, "%s alg registration failed\n",
2780					name);
2781				kfree(t_alg);
2782			} else
2783				list_add_tail(&t_alg->entry, &priv->alg_list);
2784		}
2785	}
2786	if (!list_empty(&priv->alg_list))
2787		dev_info(dev, "%s algorithms registered in /proc/crypto\n",
2788			 (char *)of_get_property(np, "compatible", NULL));
2789
2790	return 0;
2791
2792err_out:
2793	talitos_remove(ofdev);
2794
2795	return err;
2796}
2797
2798static const struct of_device_id talitos_match[] = {
2799	{
2800		.compatible = "fsl,sec2.0",
2801	},
2802	{},
2803};
2804MODULE_DEVICE_TABLE(of, talitos_match);
2805
2806static struct platform_driver talitos_driver = {
2807	.driver = {
2808		.name = "talitos",
2809		.of_match_table = talitos_match,
2810	},
2811	.probe = talitos_probe,
2812	.remove = talitos_remove,
2813};
2814
2815module_platform_driver(talitos_driver);
2816
2817MODULE_LICENSE("GPL");
2818MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
2819MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
2820