1 /*
2 * talitos - Freescale Integrated Security Engine (SEC) device driver
3 *
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/io.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
43
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
55
56 #include "talitos.h"
57
to_talitos_ptr(struct talitos_ptr * ptr,dma_addr_t dma_addr,bool is_sec1)58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59 bool is_sec1)
60 {
61 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
62 if (!is_sec1)
63 ptr->eptr = upper_32_bits(dma_addr);
64 }
65
copy_talitos_ptr(struct talitos_ptr * dst_ptr,struct talitos_ptr * src_ptr,bool is_sec1)66 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
67 struct talitos_ptr *src_ptr, bool is_sec1)
68 {
69 dst_ptr->ptr = src_ptr->ptr;
70 if (!is_sec1)
71 dst_ptr->eptr = src_ptr->eptr;
72 }
73
to_talitos_ptr_len(struct talitos_ptr * ptr,unsigned int len,bool is_sec1)74 static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
75 bool is_sec1)
76 {
77 if (is_sec1) {
78 ptr->res = 0;
79 ptr->len1 = cpu_to_be16(len);
80 } else {
81 ptr->len = cpu_to_be16(len);
82 }
83 }
84
from_talitos_ptr_len(struct talitos_ptr * ptr,bool is_sec1)85 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
86 bool is_sec1)
87 {
88 if (is_sec1)
89 return be16_to_cpu(ptr->len1);
90 else
91 return be16_to_cpu(ptr->len);
92 }
93
to_talitos_ptr_extent_clear(struct talitos_ptr * ptr,bool is_sec1)94 static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1)
95 {
96 if (!is_sec1)
97 ptr->j_extent = 0;
98 }
99
100 /*
101 * map virtual single (contiguous) pointer to h/w descriptor pointer
102 */
map_single_talitos_ptr(struct device * dev,struct talitos_ptr * ptr,unsigned int len,void * data,enum dma_data_direction dir)103 static void map_single_talitos_ptr(struct device *dev,
104 struct talitos_ptr *ptr,
105 unsigned int len, void *data,
106 enum dma_data_direction dir)
107 {
108 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
109 struct talitos_private *priv = dev_get_drvdata(dev);
110 bool is_sec1 = has_ftr_sec1(priv);
111
112 to_talitos_ptr_len(ptr, len, is_sec1);
113 to_talitos_ptr(ptr, dma_addr, is_sec1);
114 to_talitos_ptr_extent_clear(ptr, is_sec1);
115 }
116
117 /*
118 * unmap bus single (contiguous) h/w descriptor pointer
119 */
unmap_single_talitos_ptr(struct device * dev,struct talitos_ptr * ptr,enum dma_data_direction dir)120 static void unmap_single_talitos_ptr(struct device *dev,
121 struct talitos_ptr *ptr,
122 enum dma_data_direction dir)
123 {
124 struct talitos_private *priv = dev_get_drvdata(dev);
125 bool is_sec1 = has_ftr_sec1(priv);
126
127 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
128 from_talitos_ptr_len(ptr, is_sec1), dir);
129 }
130
reset_channel(struct device * dev,int ch)131 static int reset_channel(struct device *dev, int ch)
132 {
133 struct talitos_private *priv = dev_get_drvdata(dev);
134 unsigned int timeout = TALITOS_TIMEOUT;
135 bool is_sec1 = has_ftr_sec1(priv);
136
137 if (is_sec1) {
138 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
139 TALITOS1_CCCR_LO_RESET);
140
141 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
142 TALITOS1_CCCR_LO_RESET) && --timeout)
143 cpu_relax();
144 } else {
145 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
146 TALITOS2_CCCR_RESET);
147
148 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
149 TALITOS2_CCCR_RESET) && --timeout)
150 cpu_relax();
151 }
152
153 if (timeout == 0) {
154 dev_err(dev, "failed to reset channel %d\n", ch);
155 return -EIO;
156 }
157
158 /* set 36-bit addressing, done writeback enable and done IRQ enable */
159 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
160 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
161
162 /* and ICCR writeback, if available */
163 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
164 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
165 TALITOS_CCCR_LO_IWSE);
166
167 return 0;
168 }
169
reset_device(struct device * dev)170 static int reset_device(struct device *dev)
171 {
172 struct talitos_private *priv = dev_get_drvdata(dev);
173 unsigned int timeout = TALITOS_TIMEOUT;
174 bool is_sec1 = has_ftr_sec1(priv);
175 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
176
177 setbits32(priv->reg + TALITOS_MCR, mcr);
178
179 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
180 && --timeout)
181 cpu_relax();
182
183 if (priv->irq[1]) {
184 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
185 setbits32(priv->reg + TALITOS_MCR, mcr);
186 }
187
188 if (timeout == 0) {
189 dev_err(dev, "failed to reset device\n");
190 return -EIO;
191 }
192
193 return 0;
194 }
195
196 /*
197 * Reset and initialize the device
198 */
init_device(struct device * dev)199 static int init_device(struct device *dev)
200 {
201 struct talitos_private *priv = dev_get_drvdata(dev);
202 int ch, err;
203 bool is_sec1 = has_ftr_sec1(priv);
204
205 /*
206 * Master reset
207 * errata documentation: warning: certain SEC interrupts
208 * are not fully cleared by writing the MCR:SWR bit,
209 * set bit twice to completely reset
210 */
211 err = reset_device(dev);
212 if (err)
213 return err;
214
215 err = reset_device(dev);
216 if (err)
217 return err;
218
219 /* reset channels */
220 for (ch = 0; ch < priv->num_channels; ch++) {
221 err = reset_channel(dev, ch);
222 if (err)
223 return err;
224 }
225
226 /* enable channel done and error interrupts */
227 if (is_sec1) {
228 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
229 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
230 /* disable parity error check in DEU (erroneous? test vect.) */
231 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
232 } else {
233 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
234 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
235 }
236
237 /* disable integrity check error interrupts (use writeback instead) */
238 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
239 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
240 TALITOS_MDEUICR_LO_ICE);
241
242 return 0;
243 }
244
245 /**
246 * talitos_submit - submits a descriptor to the device for processing
247 * @dev: the SEC device to be used
248 * @ch: the SEC device channel to be used
249 * @desc: the descriptor to be processed by the device
250 * @callback: whom to call when processing is complete
251 * @context: a handle for use by caller (optional)
252 *
253 * desc must contain valid dma-mapped (bus physical) address pointers.
254 * callback must check err and feedback in descriptor header
255 * for device processing status.
256 */
talitos_submit(struct device * dev,int ch,struct talitos_desc * desc,void (* callback)(struct device * dev,struct talitos_desc * desc,void * context,int error),void * context)257 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
258 void (*callback)(struct device *dev,
259 struct talitos_desc *desc,
260 void *context, int error),
261 void *context)
262 {
263 struct talitos_private *priv = dev_get_drvdata(dev);
264 struct talitos_request *request;
265 unsigned long flags;
266 int head;
267 bool is_sec1 = has_ftr_sec1(priv);
268
269 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
270
271 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
272 /* h/w fifo is full */
273 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
274 return -EAGAIN;
275 }
276
277 head = priv->chan[ch].head;
278 request = &priv->chan[ch].fifo[head];
279
280 /* map descriptor and save caller data */
281 if (is_sec1) {
282 desc->hdr1 = desc->hdr;
283 desc->next_desc = 0;
284 request->dma_desc = dma_map_single(dev, &desc->hdr1,
285 TALITOS_DESC_SIZE,
286 DMA_BIDIRECTIONAL);
287 } else {
288 request->dma_desc = dma_map_single(dev, desc,
289 TALITOS_DESC_SIZE,
290 DMA_BIDIRECTIONAL);
291 }
292 request->callback = callback;
293 request->context = context;
294
295 /* increment fifo head */
296 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
297
298 smp_wmb();
299 request->desc = desc;
300
301 /* GO! */
302 wmb();
303 out_be32(priv->chan[ch].reg + TALITOS_FF,
304 upper_32_bits(request->dma_desc));
305 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
306 lower_32_bits(request->dma_desc));
307
308 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
309
310 return -EINPROGRESS;
311 }
312 EXPORT_SYMBOL(talitos_submit);
313
314 /*
315 * process what was done, notify callback of error if not
316 */
flush_channel(struct device * dev,int ch,int error,int reset_ch)317 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
318 {
319 struct talitos_private *priv = dev_get_drvdata(dev);
320 struct talitos_request *request, saved_req;
321 unsigned long flags;
322 int tail, status;
323 bool is_sec1 = has_ftr_sec1(priv);
324
325 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
326
327 tail = priv->chan[ch].tail;
328 while (priv->chan[ch].fifo[tail].desc) {
329 __be32 hdr;
330
331 request = &priv->chan[ch].fifo[tail];
332
333 /* descriptors with their done bits set don't get the error */
334 rmb();
335 hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr;
336
337 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
338 status = 0;
339 else
340 if (!error)
341 break;
342 else
343 status = error;
344
345 dma_unmap_single(dev, request->dma_desc,
346 TALITOS_DESC_SIZE,
347 DMA_BIDIRECTIONAL);
348
349 /* copy entries so we can call callback outside lock */
350 saved_req.desc = request->desc;
351 saved_req.callback = request->callback;
352 saved_req.context = request->context;
353
354 /* release request entry in fifo */
355 smp_wmb();
356 request->desc = NULL;
357
358 /* increment fifo tail */
359 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
360
361 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
362
363 atomic_dec(&priv->chan[ch].submit_count);
364
365 saved_req.callback(dev, saved_req.desc, saved_req.context,
366 status);
367 /* channel may resume processing in single desc error case */
368 if (error && !reset_ch && status == error)
369 return;
370 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
371 tail = priv->chan[ch].tail;
372 }
373
374 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
375 }
376
377 /*
378 * process completed requests for channels that have done status
379 */
380 #define DEF_TALITOS1_DONE(name, ch_done_mask) \
381 static void talitos1_done_##name(unsigned long data) \
382 { \
383 struct device *dev = (struct device *)data; \
384 struct talitos_private *priv = dev_get_drvdata(dev); \
385 unsigned long flags; \
386 \
387 if (ch_done_mask & 0x10000000) \
388 flush_channel(dev, 0, 0, 0); \
389 if (priv->num_channels == 1) \
390 goto out; \
391 if (ch_done_mask & 0x40000000) \
392 flush_channel(dev, 1, 0, 0); \
393 if (ch_done_mask & 0x00010000) \
394 flush_channel(dev, 2, 0, 0); \
395 if (ch_done_mask & 0x00040000) \
396 flush_channel(dev, 3, 0, 0); \
397 \
398 out: \
399 /* At this point, all completed channels have been processed */ \
400 /* Unmask done interrupts for channels completed later on. */ \
401 spin_lock_irqsave(&priv->reg_lock, flags); \
402 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
403 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
404 spin_unlock_irqrestore(&priv->reg_lock, flags); \
405 }
406
407 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
408
409 #define DEF_TALITOS2_DONE(name, ch_done_mask) \
410 static void talitos2_done_##name(unsigned long data) \
411 { \
412 struct device *dev = (struct device *)data; \
413 struct talitos_private *priv = dev_get_drvdata(dev); \
414 unsigned long flags; \
415 \
416 if (ch_done_mask & 1) \
417 flush_channel(dev, 0, 0, 0); \
418 if (priv->num_channels == 1) \
419 goto out; \
420 if (ch_done_mask & (1 << 2)) \
421 flush_channel(dev, 1, 0, 0); \
422 if (ch_done_mask & (1 << 4)) \
423 flush_channel(dev, 2, 0, 0); \
424 if (ch_done_mask & (1 << 6)) \
425 flush_channel(dev, 3, 0, 0); \
426 \
427 out: \
428 /* At this point, all completed channels have been processed */ \
429 /* Unmask done interrupts for channels completed later on. */ \
430 spin_lock_irqsave(&priv->reg_lock, flags); \
431 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
432 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
433 spin_unlock_irqrestore(&priv->reg_lock, flags); \
434 }
435
436 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
DEF_TALITOS2_DONE(ch0_2,TALITOS2_ISR_CH_0_2_DONE)437 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
438 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
439
440 /*
441 * locate current (offending) descriptor
442 */
443 static u32 current_desc_hdr(struct device *dev, int ch)
444 {
445 struct talitos_private *priv = dev_get_drvdata(dev);
446 int tail, iter;
447 dma_addr_t cur_desc;
448
449 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
450 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
451
452 if (!cur_desc) {
453 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
454 return 0;
455 }
456
457 tail = priv->chan[ch].tail;
458
459 iter = tail;
460 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
461 iter = (iter + 1) & (priv->fifo_len - 1);
462 if (iter == tail) {
463 dev_err(dev, "couldn't locate current descriptor\n");
464 return 0;
465 }
466 }
467
468 return priv->chan[ch].fifo[iter].desc->hdr;
469 }
470
471 /*
472 * user diagnostics; report root cause of error based on execution unit status
473 */
report_eu_error(struct device * dev,int ch,u32 desc_hdr)474 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
475 {
476 struct talitos_private *priv = dev_get_drvdata(dev);
477 int i;
478
479 if (!desc_hdr)
480 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
481
482 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
483 case DESC_HDR_SEL0_AFEU:
484 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
485 in_be32(priv->reg_afeu + TALITOS_EUISR),
486 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
487 break;
488 case DESC_HDR_SEL0_DEU:
489 dev_err(dev, "DEUISR 0x%08x_%08x\n",
490 in_be32(priv->reg_deu + TALITOS_EUISR),
491 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
492 break;
493 case DESC_HDR_SEL0_MDEUA:
494 case DESC_HDR_SEL0_MDEUB:
495 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
496 in_be32(priv->reg_mdeu + TALITOS_EUISR),
497 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
498 break;
499 case DESC_HDR_SEL0_RNG:
500 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
501 in_be32(priv->reg_rngu + TALITOS_ISR),
502 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
503 break;
504 case DESC_HDR_SEL0_PKEU:
505 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
506 in_be32(priv->reg_pkeu + TALITOS_EUISR),
507 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
508 break;
509 case DESC_HDR_SEL0_AESU:
510 dev_err(dev, "AESUISR 0x%08x_%08x\n",
511 in_be32(priv->reg_aesu + TALITOS_EUISR),
512 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
513 break;
514 case DESC_HDR_SEL0_CRCU:
515 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
516 in_be32(priv->reg_crcu + TALITOS_EUISR),
517 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
518 break;
519 case DESC_HDR_SEL0_KEU:
520 dev_err(dev, "KEUISR 0x%08x_%08x\n",
521 in_be32(priv->reg_pkeu + TALITOS_EUISR),
522 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
523 break;
524 }
525
526 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
527 case DESC_HDR_SEL1_MDEUA:
528 case DESC_HDR_SEL1_MDEUB:
529 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
530 in_be32(priv->reg_mdeu + TALITOS_EUISR),
531 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
532 break;
533 case DESC_HDR_SEL1_CRCU:
534 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
535 in_be32(priv->reg_crcu + TALITOS_EUISR),
536 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
537 break;
538 }
539
540 for (i = 0; i < 8; i++)
541 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
542 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
543 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
544 }
545
546 /*
547 * recover from error interrupts
548 */
talitos_error(struct device * dev,u32 isr,u32 isr_lo)549 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
550 {
551 struct talitos_private *priv = dev_get_drvdata(dev);
552 unsigned int timeout = TALITOS_TIMEOUT;
553 int ch, error, reset_dev = 0;
554 u32 v_lo;
555 bool is_sec1 = has_ftr_sec1(priv);
556 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
557
558 for (ch = 0; ch < priv->num_channels; ch++) {
559 /* skip channels without errors */
560 if (is_sec1) {
561 /* bits 29, 31, 17, 19 */
562 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
563 continue;
564 } else {
565 if (!(isr & (1 << (ch * 2 + 1))))
566 continue;
567 }
568
569 error = -EINVAL;
570
571 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
572
573 if (v_lo & TALITOS_CCPSR_LO_DOF) {
574 dev_err(dev, "double fetch fifo overflow error\n");
575 error = -EAGAIN;
576 reset_ch = 1;
577 }
578 if (v_lo & TALITOS_CCPSR_LO_SOF) {
579 /* h/w dropped descriptor */
580 dev_err(dev, "single fetch fifo overflow error\n");
581 error = -EAGAIN;
582 }
583 if (v_lo & TALITOS_CCPSR_LO_MDTE)
584 dev_err(dev, "master data transfer error\n");
585 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
586 dev_err(dev, is_sec1 ? "pointeur not complete error\n"
587 : "s/g data length zero error\n");
588 if (v_lo & TALITOS_CCPSR_LO_FPZ)
589 dev_err(dev, is_sec1 ? "parity error\n"
590 : "fetch pointer zero error\n");
591 if (v_lo & TALITOS_CCPSR_LO_IDH)
592 dev_err(dev, "illegal descriptor header error\n");
593 if (v_lo & TALITOS_CCPSR_LO_IEU)
594 dev_err(dev, is_sec1 ? "static assignment error\n"
595 : "invalid exec unit error\n");
596 if (v_lo & TALITOS_CCPSR_LO_EU)
597 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
598 if (!is_sec1) {
599 if (v_lo & TALITOS_CCPSR_LO_GB)
600 dev_err(dev, "gather boundary error\n");
601 if (v_lo & TALITOS_CCPSR_LO_GRL)
602 dev_err(dev, "gather return/length error\n");
603 if (v_lo & TALITOS_CCPSR_LO_SB)
604 dev_err(dev, "scatter boundary error\n");
605 if (v_lo & TALITOS_CCPSR_LO_SRL)
606 dev_err(dev, "scatter return/length error\n");
607 }
608
609 flush_channel(dev, ch, error, reset_ch);
610
611 if (reset_ch) {
612 reset_channel(dev, ch);
613 } else {
614 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
615 TALITOS2_CCCR_CONT);
616 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
617 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
618 TALITOS2_CCCR_CONT) && --timeout)
619 cpu_relax();
620 if (timeout == 0) {
621 dev_err(dev, "failed to restart channel %d\n",
622 ch);
623 reset_dev = 1;
624 }
625 }
626 }
627 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
628 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
629 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
630 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
631 isr, isr_lo);
632 else
633 dev_err(dev, "done overflow, internal time out, or "
634 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
635
636 /* purge request queues */
637 for (ch = 0; ch < priv->num_channels; ch++)
638 flush_channel(dev, ch, -EIO, 1);
639
640 /* reset and reinitialize the device */
641 init_device(dev);
642 }
643 }
644
645 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
646 static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
647 { \
648 struct device *dev = data; \
649 struct talitos_private *priv = dev_get_drvdata(dev); \
650 u32 isr, isr_lo; \
651 unsigned long flags; \
652 \
653 spin_lock_irqsave(&priv->reg_lock, flags); \
654 isr = in_be32(priv->reg + TALITOS_ISR); \
655 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
656 /* Acknowledge interrupt */ \
657 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
658 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
659 \
660 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
661 spin_unlock_irqrestore(&priv->reg_lock, flags); \
662 talitos_error(dev, isr & ch_err_mask, isr_lo); \
663 } \
664 else { \
665 if (likely(isr & ch_done_mask)) { \
666 /* mask further done interrupts. */ \
667 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
668 /* done_task will unmask done interrupts at exit */ \
669 tasklet_schedule(&priv->done_task[tlet]); \
670 } \
671 spin_unlock_irqrestore(&priv->reg_lock, flags); \
672 } \
673 \
674 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
675 IRQ_NONE; \
676 }
677
678 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
679
680 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
681 static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
682 { \
683 struct device *dev = data; \
684 struct talitos_private *priv = dev_get_drvdata(dev); \
685 u32 isr, isr_lo; \
686 unsigned long flags; \
687 \
688 spin_lock_irqsave(&priv->reg_lock, flags); \
689 isr = in_be32(priv->reg + TALITOS_ISR); \
690 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
691 /* Acknowledge interrupt */ \
692 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
693 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
694 \
695 if (unlikely(isr & ch_err_mask || isr_lo)) { \
696 spin_unlock_irqrestore(&priv->reg_lock, flags); \
697 talitos_error(dev, isr & ch_err_mask, isr_lo); \
698 } \
699 else { \
700 if (likely(isr & ch_done_mask)) { \
701 /* mask further done interrupts. */ \
702 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
703 /* done_task will unmask done interrupts at exit */ \
704 tasklet_schedule(&priv->done_task[tlet]); \
705 } \
706 spin_unlock_irqrestore(&priv->reg_lock, flags); \
707 } \
708 \
709 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
710 IRQ_NONE; \
711 }
712
713 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
714 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
715 0)
716 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
717 1)
718
719 /*
720 * hwrng
721 */
talitos_rng_data_present(struct hwrng * rng,int wait)722 static int talitos_rng_data_present(struct hwrng *rng, int wait)
723 {
724 struct device *dev = (struct device *)rng->priv;
725 struct talitos_private *priv = dev_get_drvdata(dev);
726 u32 ofl;
727 int i;
728
729 for (i = 0; i < 20; i++) {
730 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
731 TALITOS_RNGUSR_LO_OFL;
732 if (ofl || !wait)
733 break;
734 udelay(10);
735 }
736
737 return !!ofl;
738 }
739
talitos_rng_data_read(struct hwrng * rng,u32 * data)740 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
741 {
742 struct device *dev = (struct device *)rng->priv;
743 struct talitos_private *priv = dev_get_drvdata(dev);
744
745 /* rng fifo requires 64-bit accesses */
746 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
747 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
748
749 return sizeof(u32);
750 }
751
talitos_rng_init(struct hwrng * rng)752 static int talitos_rng_init(struct hwrng *rng)
753 {
754 struct device *dev = (struct device *)rng->priv;
755 struct talitos_private *priv = dev_get_drvdata(dev);
756 unsigned int timeout = TALITOS_TIMEOUT;
757
758 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
759 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
760 & TALITOS_RNGUSR_LO_RD)
761 && --timeout)
762 cpu_relax();
763 if (timeout == 0) {
764 dev_err(dev, "failed to reset rng hw\n");
765 return -ENODEV;
766 }
767
768 /* start generating */
769 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
770
771 return 0;
772 }
773
talitos_register_rng(struct device * dev)774 static int talitos_register_rng(struct device *dev)
775 {
776 struct talitos_private *priv = dev_get_drvdata(dev);
777 int err;
778
779 priv->rng.name = dev_driver_string(dev),
780 priv->rng.init = talitos_rng_init,
781 priv->rng.data_present = talitos_rng_data_present,
782 priv->rng.data_read = talitos_rng_data_read,
783 priv->rng.priv = (unsigned long)dev;
784
785 err = hwrng_register(&priv->rng);
786 if (!err)
787 priv->rng_registered = true;
788
789 return err;
790 }
791
talitos_unregister_rng(struct device * dev)792 static void talitos_unregister_rng(struct device *dev)
793 {
794 struct talitos_private *priv = dev_get_drvdata(dev);
795
796 if (!priv->rng_registered)
797 return;
798
799 hwrng_unregister(&priv->rng);
800 priv->rng_registered = false;
801 }
802
803 /*
804 * crypto alg
805 */
806 #define TALITOS_CRA_PRIORITY 3000
807 #define TALITOS_MAX_KEY_SIZE 96
808 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
809
810 struct talitos_ctx {
811 struct device *dev;
812 int ch;
813 __be32 desc_hdr_template;
814 u8 key[TALITOS_MAX_KEY_SIZE];
815 u8 iv[TALITOS_MAX_IV_LENGTH];
816 unsigned int keylen;
817 unsigned int enckeylen;
818 unsigned int authkeylen;
819 };
820
821 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
822 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
823
824 struct talitos_ahash_req_ctx {
825 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
826 unsigned int hw_context_size;
827 u8 buf[HASH_MAX_BLOCK_SIZE];
828 u8 bufnext[HASH_MAX_BLOCK_SIZE];
829 unsigned int swinit;
830 unsigned int first;
831 unsigned int last;
832 unsigned int to_hash_later;
833 unsigned int nbuf;
834 struct scatterlist bufsl[2];
835 struct scatterlist *psrc;
836 };
837
838 struct talitos_export_state {
839 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
840 u8 buf[HASH_MAX_BLOCK_SIZE];
841 unsigned int swinit;
842 unsigned int first;
843 unsigned int last;
844 unsigned int to_hash_later;
845 unsigned int nbuf;
846 };
847
aead_setkey(struct crypto_aead * authenc,const u8 * key,unsigned int keylen)848 static int aead_setkey(struct crypto_aead *authenc,
849 const u8 *key, unsigned int keylen)
850 {
851 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
852 struct crypto_authenc_keys keys;
853
854 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
855 goto badkey;
856
857 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
858 goto badkey;
859
860 memcpy(ctx->key, keys.authkey, keys.authkeylen);
861 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
862
863 ctx->keylen = keys.authkeylen + keys.enckeylen;
864 ctx->enckeylen = keys.enckeylen;
865 ctx->authkeylen = keys.authkeylen;
866
867 return 0;
868
869 badkey:
870 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
871 return -EINVAL;
872 }
873
874 /*
875 * talitos_edesc - s/w-extended descriptor
876 * @src_nents: number of segments in input scatterlist
877 * @dst_nents: number of segments in output scatterlist
878 * @icv_ool: whether ICV is out-of-line
879 * @iv_dma: dma address of iv for checking continuity and link table
880 * @dma_len: length of dma mapped link_tbl space
881 * @dma_link_tbl: bus physical address of link_tbl/buf
882 * @desc: h/w descriptor
883 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
884 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
885 *
886 * if decrypting (with authcheck), or either one of src_nents or dst_nents
887 * is greater than 1, an integrity check value is concatenated to the end
888 * of link_tbl data
889 */
890 struct talitos_edesc {
891 int src_nents;
892 int dst_nents;
893 bool icv_ool;
894 dma_addr_t iv_dma;
895 int dma_len;
896 dma_addr_t dma_link_tbl;
897 struct talitos_desc desc;
898 union {
899 struct talitos_ptr link_tbl[0];
900 u8 buf[0];
901 };
902 };
903
talitos_sg_unmap(struct device * dev,struct talitos_edesc * edesc,struct scatterlist * src,struct scatterlist * dst)904 static void talitos_sg_unmap(struct device *dev,
905 struct talitos_edesc *edesc,
906 struct scatterlist *src,
907 struct scatterlist *dst)
908 {
909 unsigned int src_nents = edesc->src_nents ? : 1;
910 unsigned int dst_nents = edesc->dst_nents ? : 1;
911
912 if (src != dst) {
913 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
914
915 if (dst) {
916 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
917 }
918 } else
919 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
920 }
921
ipsec_esp_unmap(struct device * dev,struct talitos_edesc * edesc,struct aead_request * areq)922 static void ipsec_esp_unmap(struct device *dev,
923 struct talitos_edesc *edesc,
924 struct aead_request *areq)
925 {
926 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
927 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
928 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
929 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
930
931 talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
932
933 if (edesc->dma_len)
934 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
935 DMA_BIDIRECTIONAL);
936 }
937
938 /*
939 * ipsec_esp descriptor callbacks
940 */
ipsec_esp_encrypt_done(struct device * dev,struct talitos_desc * desc,void * context,int err)941 static void ipsec_esp_encrypt_done(struct device *dev,
942 struct talitos_desc *desc, void *context,
943 int err)
944 {
945 struct aead_request *areq = context;
946 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
947 unsigned int authsize = crypto_aead_authsize(authenc);
948 struct talitos_edesc *edesc;
949 struct scatterlist *sg;
950 void *icvdata;
951
952 edesc = container_of(desc, struct talitos_edesc, desc);
953
954 ipsec_esp_unmap(dev, edesc, areq);
955
956 /* copy the generated ICV to dst */
957 if (edesc->icv_ool) {
958 icvdata = &edesc->link_tbl[edesc->src_nents +
959 edesc->dst_nents + 2];
960 sg = sg_last(areq->dst, edesc->dst_nents);
961 memcpy((char *)sg_virt(sg) + sg->length - authsize,
962 icvdata, authsize);
963 }
964
965 kfree(edesc);
966
967 aead_request_complete(areq, err);
968 }
969
ipsec_esp_decrypt_swauth_done(struct device * dev,struct talitos_desc * desc,void * context,int err)970 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
971 struct talitos_desc *desc,
972 void *context, int err)
973 {
974 struct aead_request *req = context;
975 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
976 unsigned int authsize = crypto_aead_authsize(authenc);
977 struct talitos_edesc *edesc;
978 struct scatterlist *sg;
979 char *oicv, *icv;
980
981 edesc = container_of(desc, struct talitos_edesc, desc);
982
983 ipsec_esp_unmap(dev, edesc, req);
984
985 if (!err) {
986 /* auth check */
987 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
988 icv = (char *)sg_virt(sg) + sg->length - authsize;
989
990 if (edesc->dma_len) {
991 oicv = (char *)&edesc->link_tbl[edesc->src_nents +
992 edesc->dst_nents + 2];
993 if (edesc->icv_ool)
994 icv = oicv + authsize;
995 } else
996 oicv = (char *)&edesc->link_tbl[0];
997
998 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
999 }
1000
1001 kfree(edesc);
1002
1003 aead_request_complete(req, err);
1004 }
1005
ipsec_esp_decrypt_hwauth_done(struct device * dev,struct talitos_desc * desc,void * context,int err)1006 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1007 struct talitos_desc *desc,
1008 void *context, int err)
1009 {
1010 struct aead_request *req = context;
1011 struct talitos_edesc *edesc;
1012
1013 edesc = container_of(desc, struct talitos_edesc, desc);
1014
1015 ipsec_esp_unmap(dev, edesc, req);
1016
1017 /* check ICV auth status */
1018 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1019 DESC_HDR_LO_ICCR1_PASS))
1020 err = -EBADMSG;
1021
1022 kfree(edesc);
1023
1024 aead_request_complete(req, err);
1025 }
1026
1027 /*
1028 * convert scatterlist to SEC h/w link table format
1029 * stop at cryptlen bytes
1030 */
sg_to_link_tbl_offset(struct scatterlist * sg,int sg_count,unsigned int offset,int cryptlen,struct talitos_ptr * link_tbl_ptr)1031 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1032 unsigned int offset, int cryptlen,
1033 struct talitos_ptr *link_tbl_ptr)
1034 {
1035 int n_sg = sg_count;
1036 int count = 0;
1037
1038 while (cryptlen && sg && n_sg--) {
1039 unsigned int len = sg_dma_len(sg);
1040
1041 if (offset >= len) {
1042 offset -= len;
1043 goto next;
1044 }
1045
1046 len -= offset;
1047
1048 if (len > cryptlen)
1049 len = cryptlen;
1050
1051 to_talitos_ptr(link_tbl_ptr + count,
1052 sg_dma_address(sg) + offset, 0);
1053 link_tbl_ptr[count].len = cpu_to_be16(len);
1054 link_tbl_ptr[count].j_extent = 0;
1055 count++;
1056 cryptlen -= len;
1057 offset = 0;
1058
1059 next:
1060 sg = sg_next(sg);
1061 }
1062
1063 /* tag end of link table */
1064 if (count > 0)
1065 link_tbl_ptr[count - 1].j_extent = DESC_PTR_LNKTBL_RETURN;
1066
1067 return count;
1068 }
1069
sg_to_link_tbl(struct scatterlist * sg,int sg_count,int cryptlen,struct talitos_ptr * link_tbl_ptr)1070 static inline int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
1071 int cryptlen,
1072 struct talitos_ptr *link_tbl_ptr)
1073 {
1074 return sg_to_link_tbl_offset(sg, sg_count, 0, cryptlen,
1075 link_tbl_ptr);
1076 }
1077
1078 /*
1079 * fill in and submit ipsec_esp descriptor
1080 */
ipsec_esp(struct talitos_edesc * edesc,struct aead_request * areq,void (* callback)(struct device * dev,struct talitos_desc * desc,void * context,int error))1081 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1082 void (*callback)(struct device *dev,
1083 struct talitos_desc *desc,
1084 void *context, int error))
1085 {
1086 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1087 unsigned int authsize = crypto_aead_authsize(aead);
1088 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1089 struct device *dev = ctx->dev;
1090 struct talitos_desc *desc = &edesc->desc;
1091 unsigned int cryptlen = areq->cryptlen;
1092 unsigned int ivsize = crypto_aead_ivsize(aead);
1093 int tbl_off = 0;
1094 int sg_count, ret;
1095 int sg_link_tbl_len;
1096
1097 /* hmac key */
1098 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
1099 DMA_TO_DEVICE);
1100
1101 sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
1102 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1103 : DMA_TO_DEVICE);
1104 /* hmac data */
1105 desc->ptr[1].len = cpu_to_be16(areq->assoclen);
1106 if (sg_count > 1 &&
1107 (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
1108 areq->assoclen,
1109 &edesc->link_tbl[tbl_off])) > 1) {
1110 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
1111 sizeof(struct talitos_ptr), 0);
1112 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
1113
1114 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1115 edesc->dma_len, DMA_BIDIRECTIONAL);
1116
1117 tbl_off += ret;
1118 } else {
1119 to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
1120 desc->ptr[1].j_extent = 0;
1121 }
1122
1123 /* cipher iv */
1124 to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0);
1125 desc->ptr[2].len = cpu_to_be16(ivsize);
1126 desc->ptr[2].j_extent = 0;
1127
1128 /* cipher key */
1129 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1130 (char *)&ctx->key + ctx->authkeylen,
1131 DMA_TO_DEVICE);
1132
1133 /*
1134 * cipher in
1135 * map and adjust cipher len to aead request cryptlen.
1136 * extent is bytes of HMAC postpended to ciphertext,
1137 * typically 12 for ipsec
1138 */
1139 desc->ptr[4].len = cpu_to_be16(cryptlen);
1140 desc->ptr[4].j_extent = authsize;
1141
1142 sg_link_tbl_len = cryptlen;
1143 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1144 sg_link_tbl_len += authsize;
1145
1146 if (sg_count == 1) {
1147 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
1148 areq->assoclen, 0);
1149 } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
1150 areq->assoclen, sg_link_tbl_len,
1151 &edesc->link_tbl[tbl_off])) >
1152 1) {
1153 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1154 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1155 tbl_off *
1156 sizeof(struct talitos_ptr), 0);
1157 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1158 edesc->dma_len,
1159 DMA_BIDIRECTIONAL);
1160 tbl_off += ret;
1161 } else {
1162 copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
1163 }
1164
1165 /* cipher out */
1166 desc->ptr[5].len = cpu_to_be16(cryptlen);
1167 desc->ptr[5].j_extent = authsize;
1168
1169 if (areq->src != areq->dst)
1170 sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1,
1171 DMA_FROM_DEVICE);
1172
1173 edesc->icv_ool = false;
1174
1175 if (sg_count == 1) {
1176 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
1177 areq->assoclen, 0);
1178 } else if ((sg_count =
1179 sg_to_link_tbl_offset(areq->dst, sg_count,
1180 areq->assoclen, cryptlen,
1181 &edesc->link_tbl[tbl_off])) > 1) {
1182 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1183
1184 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1185 tbl_off * sizeof(struct talitos_ptr), 0);
1186
1187 /* Add an entry to the link table for ICV data */
1188 tbl_ptr += sg_count - 1;
1189 tbl_ptr->j_extent = 0;
1190 tbl_ptr++;
1191 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1192 tbl_ptr->len = cpu_to_be16(authsize);
1193
1194 /* icv data follows link tables */
1195 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
1196 (edesc->src_nents + edesc->dst_nents +
1197 2) * sizeof(struct talitos_ptr) +
1198 authsize, 0);
1199 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1200 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1201 edesc->dma_len, DMA_BIDIRECTIONAL);
1202
1203 edesc->icv_ool = true;
1204 } else {
1205 copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
1206 }
1207
1208 /* iv out */
1209 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1210 DMA_FROM_DEVICE);
1211
1212 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1213 if (ret != -EINPROGRESS) {
1214 ipsec_esp_unmap(dev, edesc, areq);
1215 kfree(edesc);
1216 }
1217 return ret;
1218 }
1219
1220 /*
1221 * allocate and map the extended descriptor
1222 */
talitos_edesc_alloc(struct device * dev,struct scatterlist * src,struct scatterlist * dst,u8 * iv,unsigned int assoclen,unsigned int cryptlen,unsigned int authsize,unsigned int ivsize,int icv_stashing,u32 cryptoflags,bool encrypt)1223 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1224 struct scatterlist *src,
1225 struct scatterlist *dst,
1226 u8 *iv,
1227 unsigned int assoclen,
1228 unsigned int cryptlen,
1229 unsigned int authsize,
1230 unsigned int ivsize,
1231 int icv_stashing,
1232 u32 cryptoflags,
1233 bool encrypt)
1234 {
1235 struct talitos_edesc *edesc;
1236 int src_nents, dst_nents, alloc_len, dma_len;
1237 dma_addr_t iv_dma = 0;
1238 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1239 GFP_ATOMIC;
1240 struct talitos_private *priv = dev_get_drvdata(dev);
1241 bool is_sec1 = has_ftr_sec1(priv);
1242 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1243
1244 if (cryptlen + authsize > max_len) {
1245 dev_err(dev, "length exceeds h/w max limit\n");
1246 return ERR_PTR(-EINVAL);
1247 }
1248
1249 if (ivsize)
1250 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1251
1252 if (!dst || dst == src) {
1253 src_nents = sg_nents_for_len(src,
1254 assoclen + cryptlen + authsize);
1255 src_nents = (src_nents == 1) ? 0 : src_nents;
1256 dst_nents = dst ? src_nents : 0;
1257 } else { /* dst && dst != src*/
1258 src_nents = sg_nents_for_len(src, assoclen + cryptlen +
1259 (encrypt ? 0 : authsize));
1260 src_nents = (src_nents == 1) ? 0 : src_nents;
1261 dst_nents = sg_nents_for_len(dst, assoclen + cryptlen +
1262 (encrypt ? authsize : 0));
1263 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1264 }
1265
1266 /*
1267 * allocate space for base edesc plus the link tables,
1268 * allowing for two separate entries for AD and generated ICV (+ 2),
1269 * and space for two sets of ICVs (stashed and generated)
1270 */
1271 alloc_len = sizeof(struct talitos_edesc);
1272 if (src_nents || dst_nents) {
1273 if (is_sec1)
1274 dma_len = (src_nents ? cryptlen : 0) +
1275 (dst_nents ? cryptlen : 0);
1276 else
1277 dma_len = (src_nents + dst_nents + 2) *
1278 sizeof(struct talitos_ptr) + authsize * 2;
1279 alloc_len += dma_len;
1280 } else {
1281 dma_len = 0;
1282 alloc_len += icv_stashing ? authsize : 0;
1283 }
1284
1285 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1286 if (!edesc) {
1287 if (iv_dma)
1288 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1289
1290 dev_err(dev, "could not allocate edescriptor\n");
1291 return ERR_PTR(-ENOMEM);
1292 }
1293
1294 edesc->src_nents = src_nents;
1295 edesc->dst_nents = dst_nents;
1296 edesc->iv_dma = iv_dma;
1297 edesc->dma_len = dma_len;
1298 if (dma_len)
1299 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1300 edesc->dma_len,
1301 DMA_BIDIRECTIONAL);
1302
1303 return edesc;
1304 }
1305
aead_edesc_alloc(struct aead_request * areq,u8 * iv,int icv_stashing,bool encrypt)1306 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1307 int icv_stashing, bool encrypt)
1308 {
1309 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1310 unsigned int authsize = crypto_aead_authsize(authenc);
1311 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1312 unsigned int ivsize = crypto_aead_ivsize(authenc);
1313
1314 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1315 iv, areq->assoclen, areq->cryptlen,
1316 authsize, ivsize, icv_stashing,
1317 areq->base.flags, encrypt);
1318 }
1319
aead_encrypt(struct aead_request * req)1320 static int aead_encrypt(struct aead_request *req)
1321 {
1322 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1323 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1324 struct talitos_edesc *edesc;
1325
1326 /* allocate extended descriptor */
1327 edesc = aead_edesc_alloc(req, req->iv, 0, true);
1328 if (IS_ERR(edesc))
1329 return PTR_ERR(edesc);
1330
1331 /* set encrypt */
1332 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1333
1334 return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
1335 }
1336
aead_decrypt(struct aead_request * req)1337 static int aead_decrypt(struct aead_request *req)
1338 {
1339 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1340 unsigned int authsize = crypto_aead_authsize(authenc);
1341 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1342 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1343 struct talitos_edesc *edesc;
1344 struct scatterlist *sg;
1345 void *icvdata;
1346
1347 req->cryptlen -= authsize;
1348
1349 /* allocate extended descriptor */
1350 edesc = aead_edesc_alloc(req, req->iv, 1, false);
1351 if (IS_ERR(edesc))
1352 return PTR_ERR(edesc);
1353
1354 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1355 ((!edesc->src_nents && !edesc->dst_nents) ||
1356 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1357
1358 /* decrypt and check the ICV */
1359 edesc->desc.hdr = ctx->desc_hdr_template |
1360 DESC_HDR_DIR_INBOUND |
1361 DESC_HDR_MODE1_MDEU_CICV;
1362
1363 /* reset integrity check result bits */
1364 edesc->desc.hdr_lo = 0;
1365
1366 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
1367 }
1368
1369 /* Have to check the ICV with software */
1370 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1371
1372 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1373 if (edesc->dma_len)
1374 icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1375 edesc->dst_nents + 2];
1376 else
1377 icvdata = &edesc->link_tbl[0];
1378
1379 sg = sg_last(req->src, edesc->src_nents ? : 1);
1380
1381 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
1382
1383 return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
1384 }
1385
ablkcipher_setkey(struct crypto_ablkcipher * cipher,const u8 * key,unsigned int keylen)1386 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1387 const u8 *key, unsigned int keylen)
1388 {
1389 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1390
1391 memcpy(&ctx->key, key, keylen);
1392 ctx->keylen = keylen;
1393
1394 return 0;
1395 }
1396
unmap_sg_talitos_ptr(struct device * dev,struct scatterlist * src,struct scatterlist * dst,unsigned int len,struct talitos_edesc * edesc)1397 static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src,
1398 struct scatterlist *dst, unsigned int len,
1399 struct talitos_edesc *edesc)
1400 {
1401 struct talitos_private *priv = dev_get_drvdata(dev);
1402 bool is_sec1 = has_ftr_sec1(priv);
1403
1404 if (is_sec1) {
1405 if (!edesc->src_nents) {
1406 dma_unmap_sg(dev, src, 1,
1407 dst != src ? DMA_TO_DEVICE
1408 : DMA_BIDIRECTIONAL);
1409 }
1410 if (dst && edesc->dst_nents) {
1411 dma_sync_single_for_device(dev,
1412 edesc->dma_link_tbl + len,
1413 len, DMA_FROM_DEVICE);
1414 sg_copy_from_buffer(dst, edesc->dst_nents ? : 1,
1415 edesc->buf + len, len);
1416 } else if (dst && dst != src) {
1417 dma_unmap_sg(dev, dst, 1, DMA_FROM_DEVICE);
1418 }
1419 } else {
1420 talitos_sg_unmap(dev, edesc, src, dst);
1421 }
1422 }
1423
common_nonsnoop_unmap(struct device * dev,struct talitos_edesc * edesc,struct ablkcipher_request * areq)1424 static void common_nonsnoop_unmap(struct device *dev,
1425 struct talitos_edesc *edesc,
1426 struct ablkcipher_request *areq)
1427 {
1428 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1429
1430 unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc);
1431 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1432 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1433
1434 if (edesc->dma_len)
1435 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1436 DMA_BIDIRECTIONAL);
1437 }
1438
ablkcipher_done(struct device * dev,struct talitos_desc * desc,void * context,int err)1439 static void ablkcipher_done(struct device *dev,
1440 struct talitos_desc *desc, void *context,
1441 int err)
1442 {
1443 struct ablkcipher_request *areq = context;
1444 struct talitos_edesc *edesc;
1445
1446 edesc = container_of(desc, struct talitos_edesc, desc);
1447
1448 common_nonsnoop_unmap(dev, edesc, areq);
1449
1450 kfree(edesc);
1451
1452 areq->base.complete(&areq->base, err);
1453 }
1454
map_sg_in_talitos_ptr(struct device * dev,struct scatterlist * src,unsigned int len,struct talitos_edesc * edesc,enum dma_data_direction dir,struct talitos_ptr * ptr)1455 int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
1456 unsigned int len, struct talitos_edesc *edesc,
1457 enum dma_data_direction dir, struct talitos_ptr *ptr)
1458 {
1459 int sg_count;
1460 struct talitos_private *priv = dev_get_drvdata(dev);
1461 bool is_sec1 = has_ftr_sec1(priv);
1462
1463 to_talitos_ptr_len(ptr, len, is_sec1);
1464
1465 if (is_sec1) {
1466 sg_count = edesc->src_nents ? : 1;
1467
1468 if (sg_count == 1) {
1469 dma_map_sg(dev, src, 1, dir);
1470 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
1471 } else {
1472 sg_copy_to_buffer(src, sg_count, edesc->buf, len);
1473 to_talitos_ptr(ptr, edesc->dma_link_tbl, is_sec1);
1474 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1475 len, DMA_TO_DEVICE);
1476 }
1477 } else {
1478 to_talitos_ptr_extent_clear(ptr, is_sec1);
1479
1480 sg_count = dma_map_sg(dev, src, edesc->src_nents ? : 1, dir);
1481
1482 if (sg_count == 1) {
1483 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
1484 } else {
1485 sg_count = sg_to_link_tbl(src, sg_count, len,
1486 &edesc->link_tbl[0]);
1487 if (sg_count > 1) {
1488 to_talitos_ptr(ptr, edesc->dma_link_tbl, 0);
1489 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1490 dma_sync_single_for_device(dev,
1491 edesc->dma_link_tbl,
1492 edesc->dma_len,
1493 DMA_BIDIRECTIONAL);
1494 } else {
1495 /* Only one segment now, so no link tbl needed*/
1496 to_talitos_ptr(ptr, sg_dma_address(src),
1497 is_sec1);
1498 }
1499 }
1500 }
1501 return sg_count;
1502 }
1503
map_sg_out_talitos_ptr(struct device * dev,struct scatterlist * dst,unsigned int len,struct talitos_edesc * edesc,enum dma_data_direction dir,struct talitos_ptr * ptr,int sg_count)1504 void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
1505 unsigned int len, struct talitos_edesc *edesc,
1506 enum dma_data_direction dir,
1507 struct talitos_ptr *ptr, int sg_count)
1508 {
1509 struct talitos_private *priv = dev_get_drvdata(dev);
1510 bool is_sec1 = has_ftr_sec1(priv);
1511
1512 if (dir != DMA_NONE)
1513 sg_count = dma_map_sg(dev, dst, edesc->dst_nents ? : 1, dir);
1514
1515 to_talitos_ptr_len(ptr, len, is_sec1);
1516
1517 if (is_sec1) {
1518 if (sg_count == 1) {
1519 if (dir != DMA_NONE)
1520 dma_map_sg(dev, dst, 1, dir);
1521 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1522 } else {
1523 to_talitos_ptr(ptr, edesc->dma_link_tbl + len, is_sec1);
1524 dma_sync_single_for_device(dev,
1525 edesc->dma_link_tbl + len,
1526 len, DMA_FROM_DEVICE);
1527 }
1528 } else {
1529 to_talitos_ptr_extent_clear(ptr, is_sec1);
1530
1531 if (sg_count == 1) {
1532 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1533 } else {
1534 struct talitos_ptr *link_tbl_ptr =
1535 &edesc->link_tbl[edesc->src_nents + 1];
1536
1537 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1538 (edesc->src_nents + 1) *
1539 sizeof(struct talitos_ptr), 0);
1540 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1541 sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr);
1542 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1543 edesc->dma_len,
1544 DMA_BIDIRECTIONAL);
1545 }
1546 }
1547 }
1548
common_nonsnoop(struct talitos_edesc * edesc,struct ablkcipher_request * areq,void (* callback)(struct device * dev,struct talitos_desc * desc,void * context,int error))1549 static int common_nonsnoop(struct talitos_edesc *edesc,
1550 struct ablkcipher_request *areq,
1551 void (*callback) (struct device *dev,
1552 struct talitos_desc *desc,
1553 void *context, int error))
1554 {
1555 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1556 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1557 struct device *dev = ctx->dev;
1558 struct talitos_desc *desc = &edesc->desc;
1559 unsigned int cryptlen = areq->nbytes;
1560 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1561 int sg_count, ret;
1562 struct talitos_private *priv = dev_get_drvdata(dev);
1563 bool is_sec1 = has_ftr_sec1(priv);
1564
1565 /* first DWORD empty */
1566 desc->ptr[0] = zero_entry;
1567
1568 /* cipher iv */
1569 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
1570 to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
1571 to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1);
1572
1573 /* cipher key */
1574 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1575 (char *)&ctx->key, DMA_TO_DEVICE);
1576
1577 /*
1578 * cipher in
1579 */
1580 sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc,
1581 (areq->src == areq->dst) ?
1582 DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
1583 &desc->ptr[3]);
1584
1585 /* cipher out */
1586 map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc,
1587 (areq->src == areq->dst) ? DMA_NONE
1588 : DMA_FROM_DEVICE,
1589 &desc->ptr[4], sg_count);
1590
1591 /* iv out */
1592 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1593 DMA_FROM_DEVICE);
1594
1595 /* last DWORD empty */
1596 desc->ptr[6] = zero_entry;
1597
1598 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1599 if (ret != -EINPROGRESS) {
1600 common_nonsnoop_unmap(dev, edesc, areq);
1601 kfree(edesc);
1602 }
1603 return ret;
1604 }
1605
ablkcipher_edesc_alloc(struct ablkcipher_request * areq,bool encrypt)1606 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1607 areq, bool encrypt)
1608 {
1609 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1610 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1611 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1612
1613 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1614 areq->info, 0, areq->nbytes, 0, ivsize, 0,
1615 areq->base.flags, encrypt);
1616 }
1617
ablkcipher_encrypt(struct ablkcipher_request * areq)1618 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1619 {
1620 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1621 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1622 struct talitos_edesc *edesc;
1623
1624 /* allocate extended descriptor */
1625 edesc = ablkcipher_edesc_alloc(areq, true);
1626 if (IS_ERR(edesc))
1627 return PTR_ERR(edesc);
1628
1629 /* set encrypt */
1630 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1631
1632 return common_nonsnoop(edesc, areq, ablkcipher_done);
1633 }
1634
ablkcipher_decrypt(struct ablkcipher_request * areq)1635 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1636 {
1637 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1638 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1639 struct talitos_edesc *edesc;
1640
1641 /* allocate extended descriptor */
1642 edesc = ablkcipher_edesc_alloc(areq, false);
1643 if (IS_ERR(edesc))
1644 return PTR_ERR(edesc);
1645
1646 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1647
1648 return common_nonsnoop(edesc, areq, ablkcipher_done);
1649 }
1650
common_nonsnoop_hash_unmap(struct device * dev,struct talitos_edesc * edesc,struct ahash_request * areq)1651 static void common_nonsnoop_hash_unmap(struct device *dev,
1652 struct talitos_edesc *edesc,
1653 struct ahash_request *areq)
1654 {
1655 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1656 struct talitos_private *priv = dev_get_drvdata(dev);
1657 bool is_sec1 = has_ftr_sec1(priv);
1658
1659 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1660
1661 unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc);
1662
1663 /* When using hashctx-in, must unmap it. */
1664 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1665 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1666 DMA_TO_DEVICE);
1667
1668 if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
1669 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1670 DMA_TO_DEVICE);
1671
1672 if (edesc->dma_len)
1673 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1674 DMA_BIDIRECTIONAL);
1675
1676 }
1677
ahash_done(struct device * dev,struct talitos_desc * desc,void * context,int err)1678 static void ahash_done(struct device *dev,
1679 struct talitos_desc *desc, void *context,
1680 int err)
1681 {
1682 struct ahash_request *areq = context;
1683 struct talitos_edesc *edesc =
1684 container_of(desc, struct talitos_edesc, desc);
1685 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1686
1687 if (!req_ctx->last && req_ctx->to_hash_later) {
1688 /* Position any partial block for next update/final/finup */
1689 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1690 req_ctx->nbuf = req_ctx->to_hash_later;
1691 }
1692 common_nonsnoop_hash_unmap(dev, edesc, areq);
1693
1694 kfree(edesc);
1695
1696 areq->base.complete(&areq->base, err);
1697 }
1698
1699 /*
1700 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1701 * ourself and submit a padded block
1702 */
talitos_handle_buggy_hash(struct talitos_ctx * ctx,struct talitos_edesc * edesc,struct talitos_ptr * ptr)1703 void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1704 struct talitos_edesc *edesc,
1705 struct talitos_ptr *ptr)
1706 {
1707 static u8 padded_hash[64] = {
1708 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1709 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1710 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1711 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1712 };
1713
1714 pr_err_once("Bug in SEC1, padding ourself\n");
1715 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1716 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1717 (char *)padded_hash, DMA_TO_DEVICE);
1718 }
1719
common_nonsnoop_hash(struct talitos_edesc * edesc,struct ahash_request * areq,unsigned int length,void (* callback)(struct device * dev,struct talitos_desc * desc,void * context,int error))1720 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1721 struct ahash_request *areq, unsigned int length,
1722 void (*callback) (struct device *dev,
1723 struct talitos_desc *desc,
1724 void *context, int error))
1725 {
1726 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1727 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1728 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1729 struct device *dev = ctx->dev;
1730 struct talitos_desc *desc = &edesc->desc;
1731 int ret;
1732 struct talitos_private *priv = dev_get_drvdata(dev);
1733 bool is_sec1 = has_ftr_sec1(priv);
1734
1735 /* first DWORD empty */
1736 desc->ptr[0] = zero_entry;
1737
1738 /* hash context in */
1739 if (!req_ctx->first || req_ctx->swinit) {
1740 map_single_talitos_ptr(dev, &desc->ptr[1],
1741 req_ctx->hw_context_size,
1742 (char *)req_ctx->hw_context,
1743 DMA_TO_DEVICE);
1744 req_ctx->swinit = 0;
1745 } else {
1746 desc->ptr[1] = zero_entry;
1747 /* Indicate next op is not the first. */
1748 req_ctx->first = 0;
1749 }
1750
1751 /* HMAC key */
1752 if (ctx->keylen)
1753 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1754 (char *)&ctx->key, DMA_TO_DEVICE);
1755 else
1756 desc->ptr[2] = zero_entry;
1757
1758 /*
1759 * data in
1760 */
1761 map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc,
1762 DMA_TO_DEVICE, &desc->ptr[3]);
1763
1764 /* fifth DWORD empty */
1765 desc->ptr[4] = zero_entry;
1766
1767 /* hash/HMAC out -or- hash context out */
1768 if (req_ctx->last)
1769 map_single_talitos_ptr(dev, &desc->ptr[5],
1770 crypto_ahash_digestsize(tfm),
1771 areq->result, DMA_FROM_DEVICE);
1772 else
1773 map_single_talitos_ptr(dev, &desc->ptr[5],
1774 req_ctx->hw_context_size,
1775 req_ctx->hw_context, DMA_FROM_DEVICE);
1776
1777 /* last DWORD empty */
1778 desc->ptr[6] = zero_entry;
1779
1780 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1781 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1782
1783 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1784 if (ret != -EINPROGRESS) {
1785 common_nonsnoop_hash_unmap(dev, edesc, areq);
1786 kfree(edesc);
1787 }
1788 return ret;
1789 }
1790
ahash_edesc_alloc(struct ahash_request * areq,unsigned int nbytes)1791 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1792 unsigned int nbytes)
1793 {
1794 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1795 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1796 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1797
1798 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1799 nbytes, 0, 0, 0, areq->base.flags, false);
1800 }
1801
ahash_init(struct ahash_request * areq)1802 static int ahash_init(struct ahash_request *areq)
1803 {
1804 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1805 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1806
1807 /* Initialize the context */
1808 req_ctx->nbuf = 0;
1809 req_ctx->first = 1; /* first indicates h/w must init its context */
1810 req_ctx->swinit = 0; /* assume h/w init of context */
1811 req_ctx->hw_context_size =
1812 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1813 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1814 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1815
1816 return 0;
1817 }
1818
1819 /*
1820 * on h/w without explicit sha224 support, we initialize h/w context
1821 * manually with sha224 constants, and tell it to run sha256.
1822 */
ahash_init_sha224_swinit(struct ahash_request * areq)1823 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1824 {
1825 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1826
1827 ahash_init(areq);
1828 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1829
1830 req_ctx->hw_context[0] = SHA224_H0;
1831 req_ctx->hw_context[1] = SHA224_H1;
1832 req_ctx->hw_context[2] = SHA224_H2;
1833 req_ctx->hw_context[3] = SHA224_H3;
1834 req_ctx->hw_context[4] = SHA224_H4;
1835 req_ctx->hw_context[5] = SHA224_H5;
1836 req_ctx->hw_context[6] = SHA224_H6;
1837 req_ctx->hw_context[7] = SHA224_H7;
1838
1839 /* init 64-bit count */
1840 req_ctx->hw_context[8] = 0;
1841 req_ctx->hw_context[9] = 0;
1842
1843 return 0;
1844 }
1845
ahash_process_req(struct ahash_request * areq,unsigned int nbytes)1846 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1847 {
1848 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1849 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1850 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1851 struct talitos_edesc *edesc;
1852 unsigned int blocksize =
1853 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1854 unsigned int nbytes_to_hash;
1855 unsigned int to_hash_later;
1856 unsigned int nsg;
1857
1858 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1859 /* Buffer up to one whole block */
1860 sg_copy_to_buffer(areq->src,
1861 sg_nents_for_len(areq->src, nbytes),
1862 req_ctx->buf + req_ctx->nbuf, nbytes);
1863 req_ctx->nbuf += nbytes;
1864 return 0;
1865 }
1866
1867 /* At least (blocksize + 1) bytes are available to hash */
1868 nbytes_to_hash = nbytes + req_ctx->nbuf;
1869 to_hash_later = nbytes_to_hash & (blocksize - 1);
1870
1871 if (req_ctx->last)
1872 to_hash_later = 0;
1873 else if (to_hash_later)
1874 /* There is a partial block. Hash the full block(s) now */
1875 nbytes_to_hash -= to_hash_later;
1876 else {
1877 /* Keep one block buffered */
1878 nbytes_to_hash -= blocksize;
1879 to_hash_later = blocksize;
1880 }
1881
1882 /* Chain in any previously buffered data */
1883 if (req_ctx->nbuf) {
1884 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1885 sg_init_table(req_ctx->bufsl, nsg);
1886 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1887 if (nsg > 1)
1888 sg_chain(req_ctx->bufsl, 2, areq->src);
1889 req_ctx->psrc = req_ctx->bufsl;
1890 } else
1891 req_ctx->psrc = areq->src;
1892
1893 if (to_hash_later) {
1894 int nents = sg_nents_for_len(areq->src, nbytes);
1895 sg_pcopy_to_buffer(areq->src, nents,
1896 req_ctx->bufnext,
1897 to_hash_later,
1898 nbytes - to_hash_later);
1899 }
1900 req_ctx->to_hash_later = to_hash_later;
1901
1902 /* Allocate extended descriptor */
1903 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1904 if (IS_ERR(edesc))
1905 return PTR_ERR(edesc);
1906
1907 edesc->desc.hdr = ctx->desc_hdr_template;
1908
1909 /* On last one, request SEC to pad; otherwise continue */
1910 if (req_ctx->last)
1911 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1912 else
1913 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1914
1915 /* request SEC to INIT hash. */
1916 if (req_ctx->first && !req_ctx->swinit)
1917 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1918
1919 /* When the tfm context has a keylen, it's an HMAC.
1920 * A first or last (ie. not middle) descriptor must request HMAC.
1921 */
1922 if (ctx->keylen && (req_ctx->first || req_ctx->last))
1923 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1924
1925 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1926 ahash_done);
1927 }
1928
ahash_update(struct ahash_request * areq)1929 static int ahash_update(struct ahash_request *areq)
1930 {
1931 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1932
1933 req_ctx->last = 0;
1934
1935 return ahash_process_req(areq, areq->nbytes);
1936 }
1937
ahash_final(struct ahash_request * areq)1938 static int ahash_final(struct ahash_request *areq)
1939 {
1940 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1941
1942 req_ctx->last = 1;
1943
1944 return ahash_process_req(areq, 0);
1945 }
1946
ahash_finup(struct ahash_request * areq)1947 static int ahash_finup(struct ahash_request *areq)
1948 {
1949 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1950
1951 req_ctx->last = 1;
1952
1953 return ahash_process_req(areq, areq->nbytes);
1954 }
1955
ahash_digest(struct ahash_request * areq)1956 static int ahash_digest(struct ahash_request *areq)
1957 {
1958 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1959 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
1960
1961 ahash->init(areq);
1962 req_ctx->last = 1;
1963
1964 return ahash_process_req(areq, areq->nbytes);
1965 }
1966
ahash_export(struct ahash_request * areq,void * out)1967 static int ahash_export(struct ahash_request *areq, void *out)
1968 {
1969 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1970 struct talitos_export_state *export = out;
1971
1972 memcpy(export->hw_context, req_ctx->hw_context,
1973 req_ctx->hw_context_size);
1974 memcpy(export->buf, req_ctx->buf, req_ctx->nbuf);
1975 export->swinit = req_ctx->swinit;
1976 export->first = req_ctx->first;
1977 export->last = req_ctx->last;
1978 export->to_hash_later = req_ctx->to_hash_later;
1979 export->nbuf = req_ctx->nbuf;
1980
1981 return 0;
1982 }
1983
ahash_import(struct ahash_request * areq,const void * in)1984 static int ahash_import(struct ahash_request *areq, const void *in)
1985 {
1986 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1987 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1988 const struct talitos_export_state *export = in;
1989
1990 memset(req_ctx, 0, sizeof(*req_ctx));
1991 req_ctx->hw_context_size =
1992 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1993 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1994 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1995 memcpy(req_ctx->hw_context, export->hw_context,
1996 req_ctx->hw_context_size);
1997 memcpy(req_ctx->buf, export->buf, export->nbuf);
1998 req_ctx->swinit = export->swinit;
1999 req_ctx->first = export->first;
2000 req_ctx->last = export->last;
2001 req_ctx->to_hash_later = export->to_hash_later;
2002 req_ctx->nbuf = export->nbuf;
2003
2004 return 0;
2005 }
2006
2007 struct keyhash_result {
2008 struct completion completion;
2009 int err;
2010 };
2011
keyhash_complete(struct crypto_async_request * req,int err)2012 static void keyhash_complete(struct crypto_async_request *req, int err)
2013 {
2014 struct keyhash_result *res = req->data;
2015
2016 if (err == -EINPROGRESS)
2017 return;
2018
2019 res->err = err;
2020 complete(&res->completion);
2021 }
2022
keyhash(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen,u8 * hash)2023 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2024 u8 *hash)
2025 {
2026 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2027
2028 struct scatterlist sg[1];
2029 struct ahash_request *req;
2030 struct keyhash_result hresult;
2031 int ret;
2032
2033 init_completion(&hresult.completion);
2034
2035 req = ahash_request_alloc(tfm, GFP_KERNEL);
2036 if (!req)
2037 return -ENOMEM;
2038
2039 /* Keep tfm keylen == 0 during hash of the long key */
2040 ctx->keylen = 0;
2041 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2042 keyhash_complete, &hresult);
2043
2044 sg_init_one(&sg[0], key, keylen);
2045
2046 ahash_request_set_crypt(req, sg, hash, keylen);
2047 ret = crypto_ahash_digest(req);
2048 switch (ret) {
2049 case 0:
2050 break;
2051 case -EINPROGRESS:
2052 case -EBUSY:
2053 ret = wait_for_completion_interruptible(
2054 &hresult.completion);
2055 if (!ret)
2056 ret = hresult.err;
2057 break;
2058 default:
2059 break;
2060 }
2061 ahash_request_free(req);
2062
2063 return ret;
2064 }
2065
ahash_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)2066 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2067 unsigned int keylen)
2068 {
2069 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2070 unsigned int blocksize =
2071 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2072 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2073 unsigned int keysize = keylen;
2074 u8 hash[SHA512_DIGEST_SIZE];
2075 int ret;
2076
2077 if (keylen <= blocksize)
2078 memcpy(ctx->key, key, keysize);
2079 else {
2080 /* Must get the hash of the long key */
2081 ret = keyhash(tfm, key, keylen, hash);
2082
2083 if (ret) {
2084 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2085 return -EINVAL;
2086 }
2087
2088 keysize = digestsize;
2089 memcpy(ctx->key, hash, digestsize);
2090 }
2091
2092 ctx->keylen = keysize;
2093
2094 return 0;
2095 }
2096
2097
2098 struct talitos_alg_template {
2099 u32 type;
2100 union {
2101 struct crypto_alg crypto;
2102 struct ahash_alg hash;
2103 struct aead_alg aead;
2104 } alg;
2105 __be32 desc_hdr_template;
2106 };
2107
2108 static struct talitos_alg_template driver_algs[] = {
2109 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
2110 { .type = CRYPTO_ALG_TYPE_AEAD,
2111 .alg.aead = {
2112 .base = {
2113 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2114 .cra_driver_name = "authenc-hmac-sha1-"
2115 "cbc-aes-talitos",
2116 .cra_blocksize = AES_BLOCK_SIZE,
2117 .cra_flags = CRYPTO_ALG_ASYNC,
2118 },
2119 .ivsize = AES_BLOCK_SIZE,
2120 .maxauthsize = SHA1_DIGEST_SIZE,
2121 },
2122 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2123 DESC_HDR_SEL0_AESU |
2124 DESC_HDR_MODE0_AESU_CBC |
2125 DESC_HDR_SEL1_MDEUA |
2126 DESC_HDR_MODE1_MDEU_INIT |
2127 DESC_HDR_MODE1_MDEU_PAD |
2128 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2129 },
2130 { .type = CRYPTO_ALG_TYPE_AEAD,
2131 .alg.aead = {
2132 .base = {
2133 .cra_name = "authenc(hmac(sha1),"
2134 "cbc(des3_ede))",
2135 .cra_driver_name = "authenc-hmac-sha1-"
2136 "cbc-3des-talitos",
2137 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2138 .cra_flags = CRYPTO_ALG_ASYNC,
2139 },
2140 .ivsize = DES3_EDE_BLOCK_SIZE,
2141 .maxauthsize = SHA1_DIGEST_SIZE,
2142 },
2143 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2144 DESC_HDR_SEL0_DEU |
2145 DESC_HDR_MODE0_DEU_CBC |
2146 DESC_HDR_MODE0_DEU_3DES |
2147 DESC_HDR_SEL1_MDEUA |
2148 DESC_HDR_MODE1_MDEU_INIT |
2149 DESC_HDR_MODE1_MDEU_PAD |
2150 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2151 },
2152 { .type = CRYPTO_ALG_TYPE_AEAD,
2153 .alg.aead = {
2154 .base = {
2155 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2156 .cra_driver_name = "authenc-hmac-sha224-"
2157 "cbc-aes-talitos",
2158 .cra_blocksize = AES_BLOCK_SIZE,
2159 .cra_flags = CRYPTO_ALG_ASYNC,
2160 },
2161 .ivsize = AES_BLOCK_SIZE,
2162 .maxauthsize = SHA224_DIGEST_SIZE,
2163 },
2164 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2165 DESC_HDR_SEL0_AESU |
2166 DESC_HDR_MODE0_AESU_CBC |
2167 DESC_HDR_SEL1_MDEUA |
2168 DESC_HDR_MODE1_MDEU_INIT |
2169 DESC_HDR_MODE1_MDEU_PAD |
2170 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2171 },
2172 { .type = CRYPTO_ALG_TYPE_AEAD,
2173 .alg.aead = {
2174 .base = {
2175 .cra_name = "authenc(hmac(sha224),"
2176 "cbc(des3_ede))",
2177 .cra_driver_name = "authenc-hmac-sha224-"
2178 "cbc-3des-talitos",
2179 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2180 .cra_flags = CRYPTO_ALG_ASYNC,
2181 },
2182 .ivsize = DES3_EDE_BLOCK_SIZE,
2183 .maxauthsize = SHA224_DIGEST_SIZE,
2184 },
2185 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2186 DESC_HDR_SEL0_DEU |
2187 DESC_HDR_MODE0_DEU_CBC |
2188 DESC_HDR_MODE0_DEU_3DES |
2189 DESC_HDR_SEL1_MDEUA |
2190 DESC_HDR_MODE1_MDEU_INIT |
2191 DESC_HDR_MODE1_MDEU_PAD |
2192 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2193 },
2194 { .type = CRYPTO_ALG_TYPE_AEAD,
2195 .alg.aead = {
2196 .base = {
2197 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2198 .cra_driver_name = "authenc-hmac-sha256-"
2199 "cbc-aes-talitos",
2200 .cra_blocksize = AES_BLOCK_SIZE,
2201 .cra_flags = CRYPTO_ALG_ASYNC,
2202 },
2203 .ivsize = AES_BLOCK_SIZE,
2204 .maxauthsize = SHA256_DIGEST_SIZE,
2205 },
2206 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2207 DESC_HDR_SEL0_AESU |
2208 DESC_HDR_MODE0_AESU_CBC |
2209 DESC_HDR_SEL1_MDEUA |
2210 DESC_HDR_MODE1_MDEU_INIT |
2211 DESC_HDR_MODE1_MDEU_PAD |
2212 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2213 },
2214 { .type = CRYPTO_ALG_TYPE_AEAD,
2215 .alg.aead = {
2216 .base = {
2217 .cra_name = "authenc(hmac(sha256),"
2218 "cbc(des3_ede))",
2219 .cra_driver_name = "authenc-hmac-sha256-"
2220 "cbc-3des-talitos",
2221 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2222 .cra_flags = CRYPTO_ALG_ASYNC,
2223 },
2224 .ivsize = DES3_EDE_BLOCK_SIZE,
2225 .maxauthsize = SHA256_DIGEST_SIZE,
2226 },
2227 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2228 DESC_HDR_SEL0_DEU |
2229 DESC_HDR_MODE0_DEU_CBC |
2230 DESC_HDR_MODE0_DEU_3DES |
2231 DESC_HDR_SEL1_MDEUA |
2232 DESC_HDR_MODE1_MDEU_INIT |
2233 DESC_HDR_MODE1_MDEU_PAD |
2234 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2235 },
2236 { .type = CRYPTO_ALG_TYPE_AEAD,
2237 .alg.aead = {
2238 .base = {
2239 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2240 .cra_driver_name = "authenc-hmac-sha384-"
2241 "cbc-aes-talitos",
2242 .cra_blocksize = AES_BLOCK_SIZE,
2243 .cra_flags = CRYPTO_ALG_ASYNC,
2244 },
2245 .ivsize = AES_BLOCK_SIZE,
2246 .maxauthsize = SHA384_DIGEST_SIZE,
2247 },
2248 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2249 DESC_HDR_SEL0_AESU |
2250 DESC_HDR_MODE0_AESU_CBC |
2251 DESC_HDR_SEL1_MDEUB |
2252 DESC_HDR_MODE1_MDEU_INIT |
2253 DESC_HDR_MODE1_MDEU_PAD |
2254 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2255 },
2256 { .type = CRYPTO_ALG_TYPE_AEAD,
2257 .alg.aead = {
2258 .base = {
2259 .cra_name = "authenc(hmac(sha384),"
2260 "cbc(des3_ede))",
2261 .cra_driver_name = "authenc-hmac-sha384-"
2262 "cbc-3des-talitos",
2263 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2264 .cra_flags = CRYPTO_ALG_ASYNC,
2265 },
2266 .ivsize = DES3_EDE_BLOCK_SIZE,
2267 .maxauthsize = SHA384_DIGEST_SIZE,
2268 },
2269 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2270 DESC_HDR_SEL0_DEU |
2271 DESC_HDR_MODE0_DEU_CBC |
2272 DESC_HDR_MODE0_DEU_3DES |
2273 DESC_HDR_SEL1_MDEUB |
2274 DESC_HDR_MODE1_MDEU_INIT |
2275 DESC_HDR_MODE1_MDEU_PAD |
2276 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2277 },
2278 { .type = CRYPTO_ALG_TYPE_AEAD,
2279 .alg.aead = {
2280 .base = {
2281 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2282 .cra_driver_name = "authenc-hmac-sha512-"
2283 "cbc-aes-talitos",
2284 .cra_blocksize = AES_BLOCK_SIZE,
2285 .cra_flags = CRYPTO_ALG_ASYNC,
2286 },
2287 .ivsize = AES_BLOCK_SIZE,
2288 .maxauthsize = SHA512_DIGEST_SIZE,
2289 },
2290 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2291 DESC_HDR_SEL0_AESU |
2292 DESC_HDR_MODE0_AESU_CBC |
2293 DESC_HDR_SEL1_MDEUB |
2294 DESC_HDR_MODE1_MDEU_INIT |
2295 DESC_HDR_MODE1_MDEU_PAD |
2296 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2297 },
2298 { .type = CRYPTO_ALG_TYPE_AEAD,
2299 .alg.aead = {
2300 .base = {
2301 .cra_name = "authenc(hmac(sha512),"
2302 "cbc(des3_ede))",
2303 .cra_driver_name = "authenc-hmac-sha512-"
2304 "cbc-3des-talitos",
2305 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2306 .cra_flags = CRYPTO_ALG_ASYNC,
2307 },
2308 .ivsize = DES3_EDE_BLOCK_SIZE,
2309 .maxauthsize = SHA512_DIGEST_SIZE,
2310 },
2311 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2312 DESC_HDR_SEL0_DEU |
2313 DESC_HDR_MODE0_DEU_CBC |
2314 DESC_HDR_MODE0_DEU_3DES |
2315 DESC_HDR_SEL1_MDEUB |
2316 DESC_HDR_MODE1_MDEU_INIT |
2317 DESC_HDR_MODE1_MDEU_PAD |
2318 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2319 },
2320 { .type = CRYPTO_ALG_TYPE_AEAD,
2321 .alg.aead = {
2322 .base = {
2323 .cra_name = "authenc(hmac(md5),cbc(aes))",
2324 .cra_driver_name = "authenc-hmac-md5-"
2325 "cbc-aes-talitos",
2326 .cra_blocksize = AES_BLOCK_SIZE,
2327 .cra_flags = CRYPTO_ALG_ASYNC,
2328 },
2329 .ivsize = AES_BLOCK_SIZE,
2330 .maxauthsize = MD5_DIGEST_SIZE,
2331 },
2332 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2333 DESC_HDR_SEL0_AESU |
2334 DESC_HDR_MODE0_AESU_CBC |
2335 DESC_HDR_SEL1_MDEUA |
2336 DESC_HDR_MODE1_MDEU_INIT |
2337 DESC_HDR_MODE1_MDEU_PAD |
2338 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2339 },
2340 { .type = CRYPTO_ALG_TYPE_AEAD,
2341 .alg.aead = {
2342 .base = {
2343 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2344 .cra_driver_name = "authenc-hmac-md5-"
2345 "cbc-3des-talitos",
2346 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2347 .cra_flags = CRYPTO_ALG_ASYNC,
2348 },
2349 .ivsize = DES3_EDE_BLOCK_SIZE,
2350 .maxauthsize = MD5_DIGEST_SIZE,
2351 },
2352 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2353 DESC_HDR_SEL0_DEU |
2354 DESC_HDR_MODE0_DEU_CBC |
2355 DESC_HDR_MODE0_DEU_3DES |
2356 DESC_HDR_SEL1_MDEUA |
2357 DESC_HDR_MODE1_MDEU_INIT |
2358 DESC_HDR_MODE1_MDEU_PAD |
2359 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2360 },
2361 /* ABLKCIPHER algorithms. */
2362 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2363 .alg.crypto = {
2364 .cra_name = "cbc(aes)",
2365 .cra_driver_name = "cbc-aes-talitos",
2366 .cra_blocksize = AES_BLOCK_SIZE,
2367 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2368 CRYPTO_ALG_ASYNC,
2369 .cra_ablkcipher = {
2370 .min_keysize = AES_MIN_KEY_SIZE,
2371 .max_keysize = AES_MAX_KEY_SIZE,
2372 .ivsize = AES_BLOCK_SIZE,
2373 }
2374 },
2375 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2376 DESC_HDR_SEL0_AESU |
2377 DESC_HDR_MODE0_AESU_CBC,
2378 },
2379 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2380 .alg.crypto = {
2381 .cra_name = "cbc(des3_ede)",
2382 .cra_driver_name = "cbc-3des-talitos",
2383 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2384 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2385 CRYPTO_ALG_ASYNC,
2386 .cra_ablkcipher = {
2387 .min_keysize = DES3_EDE_KEY_SIZE,
2388 .max_keysize = DES3_EDE_KEY_SIZE,
2389 .ivsize = DES3_EDE_BLOCK_SIZE,
2390 }
2391 },
2392 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2393 DESC_HDR_SEL0_DEU |
2394 DESC_HDR_MODE0_DEU_CBC |
2395 DESC_HDR_MODE0_DEU_3DES,
2396 },
2397 /* AHASH algorithms. */
2398 { .type = CRYPTO_ALG_TYPE_AHASH,
2399 .alg.hash = {
2400 .halg.digestsize = MD5_DIGEST_SIZE,
2401 .halg.statesize = sizeof(struct talitos_export_state),
2402 .halg.base = {
2403 .cra_name = "md5",
2404 .cra_driver_name = "md5-talitos",
2405 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2406 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2407 CRYPTO_ALG_ASYNC,
2408 }
2409 },
2410 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2411 DESC_HDR_SEL0_MDEUA |
2412 DESC_HDR_MODE0_MDEU_MD5,
2413 },
2414 { .type = CRYPTO_ALG_TYPE_AHASH,
2415 .alg.hash = {
2416 .halg.digestsize = SHA1_DIGEST_SIZE,
2417 .halg.statesize = sizeof(struct talitos_export_state),
2418 .halg.base = {
2419 .cra_name = "sha1",
2420 .cra_driver_name = "sha1-talitos",
2421 .cra_blocksize = SHA1_BLOCK_SIZE,
2422 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2423 CRYPTO_ALG_ASYNC,
2424 }
2425 },
2426 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2427 DESC_HDR_SEL0_MDEUA |
2428 DESC_HDR_MODE0_MDEU_SHA1,
2429 },
2430 { .type = CRYPTO_ALG_TYPE_AHASH,
2431 .alg.hash = {
2432 .halg.digestsize = SHA224_DIGEST_SIZE,
2433 .halg.statesize = sizeof(struct talitos_export_state),
2434 .halg.base = {
2435 .cra_name = "sha224",
2436 .cra_driver_name = "sha224-talitos",
2437 .cra_blocksize = SHA224_BLOCK_SIZE,
2438 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2439 CRYPTO_ALG_ASYNC,
2440 }
2441 },
2442 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2443 DESC_HDR_SEL0_MDEUA |
2444 DESC_HDR_MODE0_MDEU_SHA224,
2445 },
2446 { .type = CRYPTO_ALG_TYPE_AHASH,
2447 .alg.hash = {
2448 .halg.digestsize = SHA256_DIGEST_SIZE,
2449 .halg.statesize = sizeof(struct talitos_export_state),
2450 .halg.base = {
2451 .cra_name = "sha256",
2452 .cra_driver_name = "sha256-talitos",
2453 .cra_blocksize = SHA256_BLOCK_SIZE,
2454 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2455 CRYPTO_ALG_ASYNC,
2456 }
2457 },
2458 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2459 DESC_HDR_SEL0_MDEUA |
2460 DESC_HDR_MODE0_MDEU_SHA256,
2461 },
2462 { .type = CRYPTO_ALG_TYPE_AHASH,
2463 .alg.hash = {
2464 .halg.digestsize = SHA384_DIGEST_SIZE,
2465 .halg.statesize = sizeof(struct talitos_export_state),
2466 .halg.base = {
2467 .cra_name = "sha384",
2468 .cra_driver_name = "sha384-talitos",
2469 .cra_blocksize = SHA384_BLOCK_SIZE,
2470 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2471 CRYPTO_ALG_ASYNC,
2472 }
2473 },
2474 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2475 DESC_HDR_SEL0_MDEUB |
2476 DESC_HDR_MODE0_MDEUB_SHA384,
2477 },
2478 { .type = CRYPTO_ALG_TYPE_AHASH,
2479 .alg.hash = {
2480 .halg.digestsize = SHA512_DIGEST_SIZE,
2481 .halg.statesize = sizeof(struct talitos_export_state),
2482 .halg.base = {
2483 .cra_name = "sha512",
2484 .cra_driver_name = "sha512-talitos",
2485 .cra_blocksize = SHA512_BLOCK_SIZE,
2486 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2487 CRYPTO_ALG_ASYNC,
2488 }
2489 },
2490 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2491 DESC_HDR_SEL0_MDEUB |
2492 DESC_HDR_MODE0_MDEUB_SHA512,
2493 },
2494 { .type = CRYPTO_ALG_TYPE_AHASH,
2495 .alg.hash = {
2496 .halg.digestsize = MD5_DIGEST_SIZE,
2497 .halg.statesize = sizeof(struct talitos_export_state),
2498 .halg.base = {
2499 .cra_name = "hmac(md5)",
2500 .cra_driver_name = "hmac-md5-talitos",
2501 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2502 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2503 CRYPTO_ALG_ASYNC,
2504 }
2505 },
2506 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2507 DESC_HDR_SEL0_MDEUA |
2508 DESC_HDR_MODE0_MDEU_MD5,
2509 },
2510 { .type = CRYPTO_ALG_TYPE_AHASH,
2511 .alg.hash = {
2512 .halg.digestsize = SHA1_DIGEST_SIZE,
2513 .halg.statesize = sizeof(struct talitos_export_state),
2514 .halg.base = {
2515 .cra_name = "hmac(sha1)",
2516 .cra_driver_name = "hmac-sha1-talitos",
2517 .cra_blocksize = SHA1_BLOCK_SIZE,
2518 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2519 CRYPTO_ALG_ASYNC,
2520 }
2521 },
2522 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2523 DESC_HDR_SEL0_MDEUA |
2524 DESC_HDR_MODE0_MDEU_SHA1,
2525 },
2526 { .type = CRYPTO_ALG_TYPE_AHASH,
2527 .alg.hash = {
2528 .halg.digestsize = SHA224_DIGEST_SIZE,
2529 .halg.statesize = sizeof(struct talitos_export_state),
2530 .halg.base = {
2531 .cra_name = "hmac(sha224)",
2532 .cra_driver_name = "hmac-sha224-talitos",
2533 .cra_blocksize = SHA224_BLOCK_SIZE,
2534 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2535 CRYPTO_ALG_ASYNC,
2536 }
2537 },
2538 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2539 DESC_HDR_SEL0_MDEUA |
2540 DESC_HDR_MODE0_MDEU_SHA224,
2541 },
2542 { .type = CRYPTO_ALG_TYPE_AHASH,
2543 .alg.hash = {
2544 .halg.digestsize = SHA256_DIGEST_SIZE,
2545 .halg.statesize = sizeof(struct talitos_export_state),
2546 .halg.base = {
2547 .cra_name = "hmac(sha256)",
2548 .cra_driver_name = "hmac-sha256-talitos",
2549 .cra_blocksize = SHA256_BLOCK_SIZE,
2550 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2551 CRYPTO_ALG_ASYNC,
2552 }
2553 },
2554 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2555 DESC_HDR_SEL0_MDEUA |
2556 DESC_HDR_MODE0_MDEU_SHA256,
2557 },
2558 { .type = CRYPTO_ALG_TYPE_AHASH,
2559 .alg.hash = {
2560 .halg.digestsize = SHA384_DIGEST_SIZE,
2561 .halg.statesize = sizeof(struct talitos_export_state),
2562 .halg.base = {
2563 .cra_name = "hmac(sha384)",
2564 .cra_driver_name = "hmac-sha384-talitos",
2565 .cra_blocksize = SHA384_BLOCK_SIZE,
2566 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2567 CRYPTO_ALG_ASYNC,
2568 }
2569 },
2570 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2571 DESC_HDR_SEL0_MDEUB |
2572 DESC_HDR_MODE0_MDEUB_SHA384,
2573 },
2574 { .type = CRYPTO_ALG_TYPE_AHASH,
2575 .alg.hash = {
2576 .halg.digestsize = SHA512_DIGEST_SIZE,
2577 .halg.statesize = sizeof(struct talitos_export_state),
2578 .halg.base = {
2579 .cra_name = "hmac(sha512)",
2580 .cra_driver_name = "hmac-sha512-talitos",
2581 .cra_blocksize = SHA512_BLOCK_SIZE,
2582 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2583 CRYPTO_ALG_ASYNC,
2584 }
2585 },
2586 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2587 DESC_HDR_SEL0_MDEUB |
2588 DESC_HDR_MODE0_MDEUB_SHA512,
2589 }
2590 };
2591
2592 struct talitos_crypto_alg {
2593 struct list_head entry;
2594 struct device *dev;
2595 struct talitos_alg_template algt;
2596 };
2597
talitos_init_common(struct talitos_ctx * ctx,struct talitos_crypto_alg * talitos_alg)2598 static int talitos_init_common(struct talitos_ctx *ctx,
2599 struct talitos_crypto_alg *talitos_alg)
2600 {
2601 struct talitos_private *priv;
2602
2603 /* update context with ptr to dev */
2604 ctx->dev = talitos_alg->dev;
2605
2606 /* assign SEC channel to tfm in round-robin fashion */
2607 priv = dev_get_drvdata(ctx->dev);
2608 ctx->ch = atomic_inc_return(&priv->last_chan) &
2609 (priv->num_channels - 1);
2610
2611 /* copy descriptor header template value */
2612 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2613
2614 /* select done notification */
2615 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2616
2617 return 0;
2618 }
2619
talitos_cra_init(struct crypto_tfm * tfm)2620 static int talitos_cra_init(struct crypto_tfm *tfm)
2621 {
2622 struct crypto_alg *alg = tfm->__crt_alg;
2623 struct talitos_crypto_alg *talitos_alg;
2624 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2625
2626 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2627 talitos_alg = container_of(__crypto_ahash_alg(alg),
2628 struct talitos_crypto_alg,
2629 algt.alg.hash);
2630 else
2631 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2632 algt.alg.crypto);
2633
2634 return talitos_init_common(ctx, talitos_alg);
2635 }
2636
talitos_cra_init_aead(struct crypto_aead * tfm)2637 static int talitos_cra_init_aead(struct crypto_aead *tfm)
2638 {
2639 struct aead_alg *alg = crypto_aead_alg(tfm);
2640 struct talitos_crypto_alg *talitos_alg;
2641 struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
2642
2643 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2644 algt.alg.aead);
2645
2646 return talitos_init_common(ctx, talitos_alg);
2647 }
2648
talitos_cra_init_ahash(struct crypto_tfm * tfm)2649 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2650 {
2651 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2652
2653 talitos_cra_init(tfm);
2654
2655 ctx->keylen = 0;
2656 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2657 sizeof(struct talitos_ahash_req_ctx));
2658
2659 return 0;
2660 }
2661
2662 /*
2663 * given the alg's descriptor header template, determine whether descriptor
2664 * type and primary/secondary execution units required match the hw
2665 * capabilities description provided in the device tree node.
2666 */
hw_supports(struct device * dev,__be32 desc_hdr_template)2667 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2668 {
2669 struct talitos_private *priv = dev_get_drvdata(dev);
2670 int ret;
2671
2672 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2673 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2674
2675 if (SECONDARY_EU(desc_hdr_template))
2676 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2677 & priv->exec_units);
2678
2679 return ret;
2680 }
2681
talitos_remove(struct platform_device * ofdev)2682 static int talitos_remove(struct platform_device *ofdev)
2683 {
2684 struct device *dev = &ofdev->dev;
2685 struct talitos_private *priv = dev_get_drvdata(dev);
2686 struct talitos_crypto_alg *t_alg, *n;
2687 int i;
2688
2689 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2690 switch (t_alg->algt.type) {
2691 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2692 break;
2693 case CRYPTO_ALG_TYPE_AEAD:
2694 crypto_unregister_aead(&t_alg->algt.alg.aead);
2695 case CRYPTO_ALG_TYPE_AHASH:
2696 crypto_unregister_ahash(&t_alg->algt.alg.hash);
2697 break;
2698 }
2699 list_del(&t_alg->entry);
2700 kfree(t_alg);
2701 }
2702
2703 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2704 talitos_unregister_rng(dev);
2705
2706 for (i = 0; priv->chan && i < priv->num_channels; i++)
2707 kfree(priv->chan[i].fifo);
2708
2709 kfree(priv->chan);
2710
2711 for (i = 0; i < 2; i++)
2712 if (priv->irq[i]) {
2713 free_irq(priv->irq[i], dev);
2714 irq_dispose_mapping(priv->irq[i]);
2715 }
2716
2717 tasklet_kill(&priv->done_task[0]);
2718 if (priv->irq[1])
2719 tasklet_kill(&priv->done_task[1]);
2720
2721 iounmap(priv->reg);
2722
2723 kfree(priv);
2724
2725 return 0;
2726 }
2727
talitos_alg_alloc(struct device * dev,struct talitos_alg_template * template)2728 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2729 struct talitos_alg_template
2730 *template)
2731 {
2732 struct talitos_private *priv = dev_get_drvdata(dev);
2733 struct talitos_crypto_alg *t_alg;
2734 struct crypto_alg *alg;
2735
2736 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2737 if (!t_alg)
2738 return ERR_PTR(-ENOMEM);
2739
2740 t_alg->algt = *template;
2741
2742 switch (t_alg->algt.type) {
2743 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2744 alg = &t_alg->algt.alg.crypto;
2745 alg->cra_init = talitos_cra_init;
2746 alg->cra_type = &crypto_ablkcipher_type;
2747 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
2748 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
2749 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
2750 alg->cra_ablkcipher.geniv = "eseqiv";
2751 break;
2752 case CRYPTO_ALG_TYPE_AEAD:
2753 alg = &t_alg->algt.alg.aead.base;
2754 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
2755 t_alg->algt.alg.aead.setkey = aead_setkey;
2756 t_alg->algt.alg.aead.encrypt = aead_encrypt;
2757 t_alg->algt.alg.aead.decrypt = aead_decrypt;
2758 break;
2759 case CRYPTO_ALG_TYPE_AHASH:
2760 alg = &t_alg->algt.alg.hash.halg.base;
2761 alg->cra_init = talitos_cra_init_ahash;
2762 alg->cra_type = &crypto_ahash_type;
2763 t_alg->algt.alg.hash.init = ahash_init;
2764 t_alg->algt.alg.hash.update = ahash_update;
2765 t_alg->algt.alg.hash.final = ahash_final;
2766 t_alg->algt.alg.hash.finup = ahash_finup;
2767 t_alg->algt.alg.hash.digest = ahash_digest;
2768 t_alg->algt.alg.hash.setkey = ahash_setkey;
2769 t_alg->algt.alg.hash.import = ahash_import;
2770 t_alg->algt.alg.hash.export = ahash_export;
2771
2772 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
2773 !strncmp(alg->cra_name, "hmac", 4)) {
2774 kfree(t_alg);
2775 return ERR_PTR(-ENOTSUPP);
2776 }
2777 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
2778 (!strcmp(alg->cra_name, "sha224") ||
2779 !strcmp(alg->cra_name, "hmac(sha224)"))) {
2780 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2781 t_alg->algt.desc_hdr_template =
2782 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2783 DESC_HDR_SEL0_MDEUA |
2784 DESC_HDR_MODE0_MDEU_SHA256;
2785 }
2786 break;
2787 default:
2788 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
2789 kfree(t_alg);
2790 return ERR_PTR(-EINVAL);
2791 }
2792
2793 alg->cra_module = THIS_MODULE;
2794 alg->cra_priority = TALITOS_CRA_PRIORITY;
2795 alg->cra_alignmask = 0;
2796 alg->cra_ctxsize = sizeof(struct talitos_ctx);
2797 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
2798
2799 t_alg->dev = dev;
2800
2801 return t_alg;
2802 }
2803
talitos_probe_irq(struct platform_device * ofdev)2804 static int talitos_probe_irq(struct platform_device *ofdev)
2805 {
2806 struct device *dev = &ofdev->dev;
2807 struct device_node *np = ofdev->dev.of_node;
2808 struct talitos_private *priv = dev_get_drvdata(dev);
2809 int err;
2810 bool is_sec1 = has_ftr_sec1(priv);
2811
2812 priv->irq[0] = irq_of_parse_and_map(np, 0);
2813 if (!priv->irq[0]) {
2814 dev_err(dev, "failed to map irq\n");
2815 return -EINVAL;
2816 }
2817 if (is_sec1) {
2818 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
2819 dev_driver_string(dev), dev);
2820 goto primary_out;
2821 }
2822
2823 priv->irq[1] = irq_of_parse_and_map(np, 1);
2824
2825 /* get the primary irq line */
2826 if (!priv->irq[1]) {
2827 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
2828 dev_driver_string(dev), dev);
2829 goto primary_out;
2830 }
2831
2832 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
2833 dev_driver_string(dev), dev);
2834 if (err)
2835 goto primary_out;
2836
2837 /* get the secondary irq line */
2838 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
2839 dev_driver_string(dev), dev);
2840 if (err) {
2841 dev_err(dev, "failed to request secondary irq\n");
2842 irq_dispose_mapping(priv->irq[1]);
2843 priv->irq[1] = 0;
2844 }
2845
2846 return err;
2847
2848 primary_out:
2849 if (err) {
2850 dev_err(dev, "failed to request primary irq\n");
2851 irq_dispose_mapping(priv->irq[0]);
2852 priv->irq[0] = 0;
2853 }
2854
2855 return err;
2856 }
2857
talitos_probe(struct platform_device * ofdev)2858 static int talitos_probe(struct platform_device *ofdev)
2859 {
2860 struct device *dev = &ofdev->dev;
2861 struct device_node *np = ofdev->dev.of_node;
2862 struct talitos_private *priv;
2863 const unsigned int *prop;
2864 int i, err;
2865 int stride;
2866
2867 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2868 if (!priv)
2869 return -ENOMEM;
2870
2871 INIT_LIST_HEAD(&priv->alg_list);
2872
2873 dev_set_drvdata(dev, priv);
2874
2875 priv->ofdev = ofdev;
2876
2877 spin_lock_init(&priv->reg_lock);
2878
2879 priv->reg = of_iomap(np, 0);
2880 if (!priv->reg) {
2881 dev_err(dev, "failed to of_iomap\n");
2882 err = -ENOMEM;
2883 goto err_out;
2884 }
2885
2886 /* get SEC version capabilities from device tree */
2887 prop = of_get_property(np, "fsl,num-channels", NULL);
2888 if (prop)
2889 priv->num_channels = *prop;
2890
2891 prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2892 if (prop)
2893 priv->chfifo_len = *prop;
2894
2895 prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2896 if (prop)
2897 priv->exec_units = *prop;
2898
2899 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2900 if (prop)
2901 priv->desc_types = *prop;
2902
2903 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2904 !priv->exec_units || !priv->desc_types) {
2905 dev_err(dev, "invalid property data in device tree node\n");
2906 err = -EINVAL;
2907 goto err_out;
2908 }
2909
2910 if (of_device_is_compatible(np, "fsl,sec3.0"))
2911 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2912
2913 if (of_device_is_compatible(np, "fsl,sec2.1"))
2914 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
2915 TALITOS_FTR_SHA224_HWINIT |
2916 TALITOS_FTR_HMAC_OK;
2917
2918 if (of_device_is_compatible(np, "fsl,sec1.0"))
2919 priv->features |= TALITOS_FTR_SEC1;
2920
2921 if (of_device_is_compatible(np, "fsl,sec1.2")) {
2922 priv->reg_deu = priv->reg + TALITOS12_DEU;
2923 priv->reg_aesu = priv->reg + TALITOS12_AESU;
2924 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
2925 stride = TALITOS1_CH_STRIDE;
2926 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
2927 priv->reg_deu = priv->reg + TALITOS10_DEU;
2928 priv->reg_aesu = priv->reg + TALITOS10_AESU;
2929 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
2930 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
2931 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
2932 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
2933 stride = TALITOS1_CH_STRIDE;
2934 } else {
2935 priv->reg_deu = priv->reg + TALITOS2_DEU;
2936 priv->reg_aesu = priv->reg + TALITOS2_AESU;
2937 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
2938 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
2939 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
2940 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
2941 priv->reg_keu = priv->reg + TALITOS2_KEU;
2942 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
2943 stride = TALITOS2_CH_STRIDE;
2944 }
2945
2946 err = talitos_probe_irq(ofdev);
2947 if (err)
2948 goto err_out;
2949
2950 if (of_device_is_compatible(np, "fsl,sec1.0")) {
2951 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
2952 (unsigned long)dev);
2953 } else {
2954 if (!priv->irq[1]) {
2955 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
2956 (unsigned long)dev);
2957 } else {
2958 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
2959 (unsigned long)dev);
2960 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
2961 (unsigned long)dev);
2962 }
2963 }
2964
2965 priv->chan = kzalloc(sizeof(struct talitos_channel) *
2966 priv->num_channels, GFP_KERNEL);
2967 if (!priv->chan) {
2968 dev_err(dev, "failed to allocate channel management space\n");
2969 err = -ENOMEM;
2970 goto err_out;
2971 }
2972
2973 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
2974
2975 for (i = 0; i < priv->num_channels; i++) {
2976 priv->chan[i].reg = priv->reg + stride * (i + 1);
2977 if (!priv->irq[1] || !(i & 1))
2978 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
2979
2980 spin_lock_init(&priv->chan[i].head_lock);
2981 spin_lock_init(&priv->chan[i].tail_lock);
2982
2983 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
2984 priv->fifo_len, GFP_KERNEL);
2985 if (!priv->chan[i].fifo) {
2986 dev_err(dev, "failed to allocate request fifo %d\n", i);
2987 err = -ENOMEM;
2988 goto err_out;
2989 }
2990
2991 atomic_set(&priv->chan[i].submit_count,
2992 -(priv->chfifo_len - 1));
2993 }
2994
2995 dma_set_mask(dev, DMA_BIT_MASK(36));
2996
2997 /* reset and initialize the h/w */
2998 err = init_device(dev);
2999 if (err) {
3000 dev_err(dev, "failed to initialize device\n");
3001 goto err_out;
3002 }
3003
3004 /* register the RNG, if available */
3005 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3006 err = talitos_register_rng(dev);
3007 if (err) {
3008 dev_err(dev, "failed to register hwrng: %d\n", err);
3009 goto err_out;
3010 } else
3011 dev_info(dev, "hwrng\n");
3012 }
3013
3014 /* register crypto algorithms the device supports */
3015 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3016 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3017 struct talitos_crypto_alg *t_alg;
3018 struct crypto_alg *alg = NULL;
3019
3020 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3021 if (IS_ERR(t_alg)) {
3022 err = PTR_ERR(t_alg);
3023 if (err == -ENOTSUPP)
3024 continue;
3025 goto err_out;
3026 }
3027
3028 switch (t_alg->algt.type) {
3029 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3030 err = crypto_register_alg(
3031 &t_alg->algt.alg.crypto);
3032 alg = &t_alg->algt.alg.crypto;
3033 break;
3034
3035 case CRYPTO_ALG_TYPE_AEAD:
3036 err = crypto_register_aead(
3037 &t_alg->algt.alg.aead);
3038 alg = &t_alg->algt.alg.aead.base;
3039 break;
3040
3041 case CRYPTO_ALG_TYPE_AHASH:
3042 err = crypto_register_ahash(
3043 &t_alg->algt.alg.hash);
3044 alg = &t_alg->algt.alg.hash.halg.base;
3045 break;
3046 }
3047 if (err) {
3048 dev_err(dev, "%s alg registration failed\n",
3049 alg->cra_driver_name);
3050 kfree(t_alg);
3051 } else
3052 list_add_tail(&t_alg->entry, &priv->alg_list);
3053 }
3054 }
3055 if (!list_empty(&priv->alg_list))
3056 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3057 (char *)of_get_property(np, "compatible", NULL));
3058
3059 return 0;
3060
3061 err_out:
3062 talitos_remove(ofdev);
3063
3064 return err;
3065 }
3066
3067 static const struct of_device_id talitos_match[] = {
3068 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3069 {
3070 .compatible = "fsl,sec1.0",
3071 },
3072 #endif
3073 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3074 {
3075 .compatible = "fsl,sec2.0",
3076 },
3077 #endif
3078 {},
3079 };
3080 MODULE_DEVICE_TABLE(of, talitos_match);
3081
3082 static struct platform_driver talitos_driver = {
3083 .driver = {
3084 .name = "talitos",
3085 .of_match_table = talitos_match,
3086 },
3087 .probe = talitos_probe,
3088 .remove = talitos_remove,
3089 };
3090
3091 module_platform_driver(talitos_driver);
3092
3093 MODULE_LICENSE("GPL");
3094 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3095 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
3096