This source file includes following definitions.
- select_channel
- spu_ablkcipher_rx_sg_create
- spu_ablkcipher_tx_sg_create
- mailbox_send_message
- handle_ablkcipher_req
- handle_ablkcipher_resp
- spu_ahash_rx_sg_create
- spu_ahash_tx_sg_create
- handle_ahash_req
- spu_hmac_outer_hash
- ahash_req_done
- handle_ahash_resp
- spu_aead_rx_sg_create
- spu_aead_tx_sg_create
- handle_aead_req
- handle_aead_resp
- spu_chunk_cleanup
- finish_req
- spu_rx_callback
- ablkcipher_enqueue
- des_setkey
- threedes_setkey
- aes_setkey
- rc4_setkey
- ablkcipher_setkey
- ablkcipher_encrypt
- ablkcipher_decrypt
- ahash_enqueue
- __ahash_init
- spu_no_incr_hash
- ahash_init
- __ahash_update
- ahash_update
- __ahash_final
- ahash_final
- __ahash_finup
- ahash_finup
- ahash_digest
- ahash_setkey
- ahash_export
- ahash_import
- ahash_hmac_setkey
- ahash_hmac_init
- ahash_hmac_update
- ahash_hmac_final
- ahash_hmac_finup
- ahash_hmac_digest
- aead_need_fallback
- aead_complete
- aead_do_fallback
- aead_enqueue
- aead_authenc_setkey
- aead_gcm_ccm_setkey
- aead_gcm_esp_setkey
- rfc4543_gcm_esp_setkey
- aead_ccm_esp_setkey
- aead_setauthsize
- aead_encrypt
- aead_decrypt
- generic_cra_init
- ablkcipher_cra_init
- ahash_cra_init
- aead_cra_init
- generic_cra_exit
- aead_cra_exit
- spu_functions_register
- spu_mb_init
- spu_mb_release
- spu_counters_init
- spu_register_ablkcipher
- spu_register_ahash
- spu_register_aead
- spu_algs_register
- spu_dt_read
- bcm_spu_probe
- bcm_spu_remove
1
2
3
4
5
6 #include <linux/err.h>
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/errno.h>
10 #include <linux/kernel.h>
11 #include <linux/interrupt.h>
12 #include <linux/platform_device.h>
13 #include <linux/scatterlist.h>
14 #include <linux/crypto.h>
15 #include <linux/kthread.h>
16 #include <linux/rtnetlink.h>
17 #include <linux/sched.h>
18 #include <linux/of_address.h>
19 #include <linux/of_device.h>
20 #include <linux/io.h>
21 #include <linux/bitops.h>
22
23 #include <crypto/algapi.h>
24 #include <crypto/aead.h>
25 #include <crypto/internal/aead.h>
26 #include <crypto/aes.h>
27 #include <crypto/internal/des.h>
28 #include <crypto/hmac.h>
29 #include <crypto/sha.h>
30 #include <crypto/md5.h>
31 #include <crypto/authenc.h>
32 #include <crypto/skcipher.h>
33 #include <crypto/hash.h>
34 #include <crypto/sha3.h>
35
36 #include "util.h"
37 #include "cipher.h"
38 #include "spu.h"
39 #include "spum.h"
40 #include "spu2.h"
41
42
43
44 struct device_private iproc_priv;
45
46
47
48 int flow_debug_logging;
49 module_param(flow_debug_logging, int, 0644);
50 MODULE_PARM_DESC(flow_debug_logging, "Enable Flow Debug Logging");
51
52 int packet_debug_logging;
53 module_param(packet_debug_logging, int, 0644);
54 MODULE_PARM_DESC(packet_debug_logging, "Enable Packet Debug Logging");
55
56 int debug_logging_sleep;
57 module_param(debug_logging_sleep, int, 0644);
58 MODULE_PARM_DESC(debug_logging_sleep, "Packet Debug Logging Sleep");
59
60
61
62
63
64
65
66
67
68
69 static int cipher_pri = 150;
70 module_param(cipher_pri, int, 0644);
71 MODULE_PARM_DESC(cipher_pri, "Priority for cipher algos");
72
73 static int hash_pri = 100;
74 module_param(hash_pri, int, 0644);
75 MODULE_PARM_DESC(hash_pri, "Priority for hash algos");
76
77 static int aead_pri = 150;
78 module_param(aead_pri, int, 0644);
79 MODULE_PARM_DESC(aead_pri, "Priority for AEAD algos");
80
81
82
83
84
85
86
87
88 static char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 };
89
90
91
92
93 #define BCM_HDR_LEN iproc_priv.bcm_hdr_len
94
95
96 #define MBOX_SLEEP_MIN 800
97 #define MBOX_SLEEP_MAX 1000
98
99
100
101
102
103
104
105 static u8 select_channel(void)
106 {
107 u8 chan_idx = atomic_inc_return(&iproc_priv.next_chan);
108
109 return chan_idx % iproc_priv.spu.num_chan;
110 }
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132 static int
133 spu_ablkcipher_rx_sg_create(struct brcm_message *mssg,
134 struct iproc_reqctx_s *rctx,
135 u8 rx_frag_num,
136 unsigned int chunksize, u32 stat_pad_len)
137 {
138 struct spu_hw *spu = &iproc_priv.spu;
139 struct scatterlist *sg;
140 struct iproc_ctx_s *ctx = rctx->ctx;
141 u32 datalen;
142
143 mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
144 rctx->gfp);
145 if (!mssg->spu.dst)
146 return -ENOMEM;
147
148 sg = mssg->spu.dst;
149 sg_init_table(sg, rx_frag_num);
150
151 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
152
153
154 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
155 spu->spu_xts_tweak_in_payload())
156 sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak,
157 SPU_XTS_TWEAK_SIZE);
158
159
160 datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
161 rctx->dst_nents, chunksize);
162 if (datalen < chunksize) {
163 pr_err("%s(): failed to copy dst sg to mbox msg. chunksize %u, datalen %u",
164 __func__, chunksize, datalen);
165 return -EFAULT;
166 }
167
168 if (ctx->cipher.alg == CIPHER_ALG_RC4)
169
170 sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak, SPU_SUPDT_LEN);
171
172 if (stat_pad_len)
173 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
174
175 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
176 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
177
178 return 0;
179 }
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200 static int
201 spu_ablkcipher_tx_sg_create(struct brcm_message *mssg,
202 struct iproc_reqctx_s *rctx,
203 u8 tx_frag_num, unsigned int chunksize, u32 pad_len)
204 {
205 struct spu_hw *spu = &iproc_priv.spu;
206 struct scatterlist *sg;
207 struct iproc_ctx_s *ctx = rctx->ctx;
208 u32 datalen;
209 u32 stat_len;
210
211 mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
212 rctx->gfp);
213 if (unlikely(!mssg->spu.src))
214 return -ENOMEM;
215
216 sg = mssg->spu.src;
217 sg_init_table(sg, tx_frag_num);
218
219 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
220 BCM_HDR_LEN + ctx->spu_req_hdr_len);
221
222
223 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
224 spu->spu_xts_tweak_in_payload())
225 sg_set_buf(sg++, rctx->msg_buf.iv_ctr, SPU_XTS_TWEAK_SIZE);
226
227
228 datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
229 rctx->src_nents, chunksize);
230 if (unlikely(datalen < chunksize)) {
231 pr_err("%s(): failed to copy src sg to mbox msg",
232 __func__);
233 return -EFAULT;
234 }
235
236 if (pad_len)
237 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
238
239 stat_len = spu->spu_tx_status_len();
240 if (stat_len) {
241 memset(rctx->msg_buf.tx_stat, 0, stat_len);
242 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
243 }
244 return 0;
245 }
246
247 static int mailbox_send_message(struct brcm_message *mssg, u32 flags,
248 u8 chan_idx)
249 {
250 int err;
251 int retry_cnt = 0;
252 struct device *dev = &(iproc_priv.pdev->dev);
253
254 err = mbox_send_message(iproc_priv.mbox[chan_idx], mssg);
255 if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
256 while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
257
258
259
260
261 retry_cnt++;
262 usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
263 err = mbox_send_message(iproc_priv.mbox[chan_idx],
264 mssg);
265 atomic_inc(&iproc_priv.mb_no_spc);
266 }
267 }
268 if (err < 0) {
269 atomic_inc(&iproc_priv.mb_send_fail);
270 return err;
271 }
272
273
274 err = mssg->error;
275 if (unlikely(err < 0)) {
276 dev_err(dev, "message error %d", err);
277
278 }
279
280
281 mbox_client_txdone(iproc_priv.mbox[chan_idx], err);
282 return err;
283 }
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303 static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
304 {
305 struct spu_hw *spu = &iproc_priv.spu;
306 struct crypto_async_request *areq = rctx->parent;
307 struct ablkcipher_request *req =
308 container_of(areq, struct ablkcipher_request, base);
309 struct iproc_ctx_s *ctx = rctx->ctx;
310 struct spu_cipher_parms cipher_parms;
311 int err = 0;
312 unsigned int chunksize = 0;
313 int remaining = 0;
314 int chunk_start;
315
316
317 u8 local_iv_ctr[MAX_IV_SIZE];
318 u32 stat_pad_len;
319 u32 pad_len;
320 bool update_key = false;
321 struct brcm_message *mssg;
322
323
324 u8 rx_frag_num = 2;
325 u8 tx_frag_num = 1;
326
327 flow_log("%s\n", __func__);
328
329 cipher_parms.alg = ctx->cipher.alg;
330 cipher_parms.mode = ctx->cipher.mode;
331 cipher_parms.type = ctx->cipher_type;
332 cipher_parms.key_len = ctx->enckeylen;
333 cipher_parms.key_buf = ctx->enckey;
334 cipher_parms.iv_buf = local_iv_ctr;
335 cipher_parms.iv_len = rctx->iv_ctr_len;
336
337 mssg = &rctx->mb_mssg;
338 chunk_start = rctx->src_sent;
339 remaining = rctx->total_todo - chunk_start;
340
341
342 if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
343 (remaining > ctx->max_payload))
344 chunksize = ctx->max_payload;
345 else
346 chunksize = remaining;
347
348 rctx->src_sent += chunksize;
349 rctx->total_sent = rctx->src_sent;
350
351
352 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
353 rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
354
355 if ((ctx->cipher.mode == CIPHER_MODE_CBC) &&
356 rctx->is_encrypt && chunk_start)
357
358
359
360
361 sg_copy_part_to_buf(req->dst, rctx->msg_buf.iv_ctr,
362 rctx->iv_ctr_len,
363 chunk_start - rctx->iv_ctr_len);
364
365 if (rctx->iv_ctr_len) {
366
367 __builtin_memcpy(local_iv_ctr, rctx->msg_buf.iv_ctr,
368 rctx->iv_ctr_len);
369
370
371 if ((ctx->cipher.mode == CIPHER_MODE_CBC) &&
372 !rctx->is_encrypt) {
373
374
375
376
377 sg_copy_part_to_buf(req->src, rctx->msg_buf.iv_ctr,
378 rctx->iv_ctr_len,
379 rctx->src_sent - rctx->iv_ctr_len);
380 } else if (ctx->cipher.mode == CIPHER_MODE_CTR) {
381
382
383
384
385
386
387
388
389
390 add_to_ctr(rctx->msg_buf.iv_ctr, chunksize >> 4);
391 }
392 }
393
394 if (ctx->cipher.alg == CIPHER_ALG_RC4) {
395 rx_frag_num++;
396 if (chunk_start) {
397
398
399
400
401 cipher_parms.key_buf = rctx->msg_buf.c.supdt_tweak;
402 update_key = true;
403 cipher_parms.type = CIPHER_TYPE_UPDT;
404 } else if (!rctx->is_encrypt) {
405
406
407
408
409
410
411 update_key = true;
412 cipher_parms.type = CIPHER_TYPE_INIT;
413 }
414 }
415
416 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
417 flow_log("max_payload infinite\n");
418 else
419 flow_log("max_payload %u\n", ctx->max_payload);
420
421 flow_log("sent:%u start:%u remains:%u size:%u\n",
422 rctx->src_sent, chunk_start, remaining, chunksize);
423
424
425 memcpy(rctx->msg_buf.bcm_spu_req_hdr, ctx->bcm_spu_req_hdr,
426 sizeof(rctx->msg_buf.bcm_spu_req_hdr));
427
428
429
430
431
432
433 spu->spu_cipher_req_finish(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
434 ctx->spu_req_hdr_len, !(rctx->is_encrypt),
435 &cipher_parms, update_key, chunksize);
436
437 atomic64_add(chunksize, &iproc_priv.bytes_out);
438
439 stat_pad_len = spu->spu_wordalign_padlen(chunksize);
440 if (stat_pad_len)
441 rx_frag_num++;
442 pad_len = stat_pad_len;
443 if (pad_len) {
444 tx_frag_num++;
445 spu->spu_request_pad(rctx->msg_buf.spu_req_pad, 0,
446 0, ctx->auth.alg, ctx->auth.mode,
447 rctx->total_sent, stat_pad_len);
448 }
449
450 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
451 ctx->spu_req_hdr_len);
452 packet_log("payload:\n");
453 dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
454 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len);
455
456
457
458
459
460 memset(mssg, 0, sizeof(*mssg));
461 mssg->type = BRCM_MESSAGE_SPU;
462 mssg->ctx = rctx;
463
464
465 rx_frag_num += rctx->dst_nents;
466
467 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
468 spu->spu_xts_tweak_in_payload())
469 rx_frag_num++;
470
471 err = spu_ablkcipher_rx_sg_create(mssg, rctx, rx_frag_num, chunksize,
472 stat_pad_len);
473 if (err)
474 return err;
475
476
477 tx_frag_num += rctx->src_nents;
478 if (spu->spu_tx_status_len())
479 tx_frag_num++;
480
481 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
482 spu->spu_xts_tweak_in_payload())
483 tx_frag_num++;
484
485 err = spu_ablkcipher_tx_sg_create(mssg, rctx, tx_frag_num, chunksize,
486 pad_len);
487 if (err)
488 return err;
489
490 err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
491 if (unlikely(err < 0))
492 return err;
493
494 return -EINPROGRESS;
495 }
496
497
498
499
500
501
502 static void handle_ablkcipher_resp(struct iproc_reqctx_s *rctx)
503 {
504 struct spu_hw *spu = &iproc_priv.spu;
505 #ifdef DEBUG
506 struct crypto_async_request *areq = rctx->parent;
507 struct ablkcipher_request *req = ablkcipher_request_cast(areq);
508 #endif
509 struct iproc_ctx_s *ctx = rctx->ctx;
510 u32 payload_len;
511
512
513 payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
514
515
516
517
518
519 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
520 spu->spu_xts_tweak_in_payload() &&
521 (payload_len >= SPU_XTS_TWEAK_SIZE))
522 payload_len -= SPU_XTS_TWEAK_SIZE;
523
524 atomic64_add(payload_len, &iproc_priv.bytes_in);
525
526 flow_log("%s() offset: %u, bd_len: %u BD:\n",
527 __func__, rctx->total_received, payload_len);
528
529 dump_sg(req->dst, rctx->total_received, payload_len);
530 if (ctx->cipher.alg == CIPHER_ALG_RC4)
531 packet_dump(" supdt ", rctx->msg_buf.c.supdt_tweak,
532 SPU_SUPDT_LEN);
533
534 rctx->total_received += payload_len;
535 if (rctx->total_received == rctx->total_todo) {
536 atomic_inc(&iproc_priv.op_counts[SPU_OP_CIPHER]);
537 atomic_inc(
538 &iproc_priv.cipher_cnt[ctx->cipher.alg][ctx->cipher.mode]);
539 }
540 }
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561 static int
562 spu_ahash_rx_sg_create(struct brcm_message *mssg,
563 struct iproc_reqctx_s *rctx,
564 u8 rx_frag_num, unsigned int digestsize,
565 u32 stat_pad_len)
566 {
567 struct spu_hw *spu = &iproc_priv.spu;
568 struct scatterlist *sg;
569 struct iproc_ctx_s *ctx = rctx->ctx;
570
571 mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
572 rctx->gfp);
573 if (!mssg->spu.dst)
574 return -ENOMEM;
575
576 sg = mssg->spu.dst;
577 sg_init_table(sg, rx_frag_num);
578
579 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
580
581
582 sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
583
584 if (stat_pad_len)
585 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
586
587 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
588 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
589 return 0;
590 }
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613 static int
614 spu_ahash_tx_sg_create(struct brcm_message *mssg,
615 struct iproc_reqctx_s *rctx,
616 u8 tx_frag_num,
617 u32 spu_hdr_len,
618 unsigned int hash_carry_len,
619 unsigned int new_data_len, u32 pad_len)
620 {
621 struct spu_hw *spu = &iproc_priv.spu;
622 struct scatterlist *sg;
623 u32 datalen;
624 u32 stat_len;
625
626 mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
627 rctx->gfp);
628 if (!mssg->spu.src)
629 return -ENOMEM;
630
631 sg = mssg->spu.src;
632 sg_init_table(sg, tx_frag_num);
633
634 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
635 BCM_HDR_LEN + spu_hdr_len);
636
637 if (hash_carry_len)
638 sg_set_buf(sg++, rctx->hash_carry, hash_carry_len);
639
640 if (new_data_len) {
641
642 datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
643 rctx->src_nents, new_data_len);
644 if (datalen < new_data_len) {
645 pr_err("%s(): failed to copy src sg to mbox msg",
646 __func__);
647 return -EFAULT;
648 }
649 }
650
651 if (pad_len)
652 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
653
654 stat_len = spu->spu_tx_status_len();
655 if (stat_len) {
656 memset(rctx->msg_buf.tx_stat, 0, stat_len);
657 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
658 }
659
660 return 0;
661 }
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689 static int handle_ahash_req(struct iproc_reqctx_s *rctx)
690 {
691 struct spu_hw *spu = &iproc_priv.spu;
692 struct crypto_async_request *areq = rctx->parent;
693 struct ahash_request *req = ahash_request_cast(areq);
694 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
695 struct crypto_tfm *tfm = crypto_ahash_tfm(ahash);
696 unsigned int blocksize = crypto_tfm_alg_blocksize(tfm);
697 struct iproc_ctx_s *ctx = rctx->ctx;
698
699
700 unsigned int nbytes_to_hash = 0;
701 int err = 0;
702 unsigned int chunksize = 0;
703
704
705
706
707 unsigned int new_data_len;
708
709 unsigned int __maybe_unused chunk_start = 0;
710 u32 db_size;
711 int pad_len = 0;
712 u32 data_pad_len = 0;
713 u32 stat_pad_len = 0;
714 struct brcm_message *mssg;
715 struct spu_request_opts req_opts;
716 struct spu_cipher_parms cipher_parms;
717 struct spu_hash_parms hash_parms;
718 struct spu_aead_parms aead_parms;
719 unsigned int local_nbuf;
720 u32 spu_hdr_len;
721 unsigned int digestsize;
722 u16 rem = 0;
723
724
725
726
727
728 u8 rx_frag_num = 3;
729 u8 tx_frag_num = 1;
730
731 flow_log("total_todo %u, total_sent %u\n",
732 rctx->total_todo, rctx->total_sent);
733
734 memset(&req_opts, 0, sizeof(req_opts));
735 memset(&cipher_parms, 0, sizeof(cipher_parms));
736 memset(&hash_parms, 0, sizeof(hash_parms));
737 memset(&aead_parms, 0, sizeof(aead_parms));
738
739 req_opts.bd_suppress = true;
740 hash_parms.alg = ctx->auth.alg;
741 hash_parms.mode = ctx->auth.mode;
742 hash_parms.type = HASH_TYPE_NONE;
743 hash_parms.key_buf = (u8 *)ctx->authkey;
744 hash_parms.key_len = ctx->authkeylen;
745
746
747
748
749
750
751
752
753 cipher_parms.type = ctx->cipher_type;
754
755 mssg = &rctx->mb_mssg;
756 chunk_start = rctx->src_sent;
757
758
759
760
761
762 nbytes_to_hash = rctx->total_todo - rctx->total_sent;
763 chunksize = nbytes_to_hash;
764 if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
765 (chunksize > ctx->max_payload))
766 chunksize = ctx->max_payload;
767
768
769
770
771
772
773 if (!rctx->is_final) {
774 u8 *dest = rctx->hash_carry + rctx->hash_carry_len;
775 u16 new_len;
776
777 rem = chunksize % blocksize;
778 if (rem) {
779
780 chunksize -= rem;
781 if (chunksize == 0) {
782
783 new_len = rem - rctx->hash_carry_len;
784 sg_copy_part_to_buf(req->src, dest, new_len,
785 rctx->src_sent);
786 rctx->hash_carry_len = rem;
787 flow_log("Exiting with hash carry len: %u\n",
788 rctx->hash_carry_len);
789 packet_dump(" buf: ",
790 rctx->hash_carry,
791 rctx->hash_carry_len);
792 return -EAGAIN;
793 }
794 }
795 }
796
797
798 local_nbuf = rctx->hash_carry_len;
799 rctx->hash_carry_len = 0;
800 if (local_nbuf)
801 tx_frag_num++;
802 new_data_len = chunksize - local_nbuf;
803
804
805 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip,
806 new_data_len);
807
808
809 if (hash_parms.alg == HASH_ALG_AES)
810 hash_parms.type = (enum hash_type)cipher_parms.type;
811 else
812 hash_parms.type = spu->spu_hash_type(rctx->total_sent);
813
814 digestsize = spu->spu_digest_size(ctx->digestsize, ctx->auth.alg,
815 hash_parms.type);
816 hash_parms.digestsize = digestsize;
817
818
819 rctx->total_sent += chunksize;
820
821 rctx->src_sent += new_data_len;
822
823 if ((rctx->total_sent == rctx->total_todo) && rctx->is_final)
824 hash_parms.pad_len = spu->spu_hash_pad_len(hash_parms.alg,
825 hash_parms.mode,
826 chunksize,
827 blocksize);
828
829
830
831
832
833 if ((hash_parms.type == HASH_TYPE_UPDT) &&
834 (hash_parms.alg != HASH_ALG_AES)) {
835 hash_parms.key_buf = rctx->incr_hash;
836 hash_parms.key_len = digestsize;
837 }
838
839 atomic64_add(chunksize, &iproc_priv.bytes_out);
840
841 flow_log("%s() final: %u nbuf: %u ",
842 __func__, rctx->is_final, local_nbuf);
843
844 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
845 flow_log("max_payload infinite\n");
846 else
847 flow_log("max_payload %u\n", ctx->max_payload);
848
849 flow_log("chunk_start: %u chunk_size: %u\n", chunk_start, chunksize);
850
851
852 memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
853
854 hash_parms.prebuf_len = local_nbuf;
855 spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
856 BCM_HDR_LEN,
857 &req_opts, &cipher_parms,
858 &hash_parms, &aead_parms,
859 new_data_len);
860
861 if (spu_hdr_len == 0) {
862 pr_err("Failed to create SPU request header\n");
863 return -EFAULT;
864 }
865
866
867
868
869
870 data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, chunksize);
871 db_size = spu_real_db_size(0, 0, local_nbuf, new_data_len,
872 0, 0, hash_parms.pad_len);
873 if (spu->spu_tx_status_len())
874 stat_pad_len = spu->spu_wordalign_padlen(db_size);
875 if (stat_pad_len)
876 rx_frag_num++;
877 pad_len = hash_parms.pad_len + data_pad_len + stat_pad_len;
878 if (pad_len) {
879 tx_frag_num++;
880 spu->spu_request_pad(rctx->msg_buf.spu_req_pad, data_pad_len,
881 hash_parms.pad_len, ctx->auth.alg,
882 ctx->auth.mode, rctx->total_sent,
883 stat_pad_len);
884 }
885
886 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
887 spu_hdr_len);
888 packet_dump(" prebuf: ", rctx->hash_carry, local_nbuf);
889 flow_log("Data:\n");
890 dump_sg(rctx->src_sg, rctx->src_skip, new_data_len);
891 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len);
892
893
894
895
896
897 memset(mssg, 0, sizeof(*mssg));
898 mssg->type = BRCM_MESSAGE_SPU;
899 mssg->ctx = rctx;
900
901
902 err = spu_ahash_rx_sg_create(mssg, rctx, rx_frag_num, digestsize,
903 stat_pad_len);
904 if (err)
905 return err;
906
907
908 tx_frag_num += rctx->src_nents;
909 if (spu->spu_tx_status_len())
910 tx_frag_num++;
911 err = spu_ahash_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
912 local_nbuf, new_data_len, pad_len);
913 if (err)
914 return err;
915
916 err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
917 if (unlikely(err < 0))
918 return err;
919
920 return -EINPROGRESS;
921 }
922
923
924
925
926
927
928
929
930
931
932
933 static int spu_hmac_outer_hash(struct ahash_request *req,
934 struct iproc_ctx_s *ctx)
935 {
936 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
937 unsigned int blocksize =
938 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
939 int rc;
940
941 switch (ctx->auth.alg) {
942 case HASH_ALG_MD5:
943 rc = do_shash("md5", req->result, ctx->opad, blocksize,
944 req->result, ctx->digestsize, NULL, 0);
945 break;
946 case HASH_ALG_SHA1:
947 rc = do_shash("sha1", req->result, ctx->opad, blocksize,
948 req->result, ctx->digestsize, NULL, 0);
949 break;
950 case HASH_ALG_SHA224:
951 rc = do_shash("sha224", req->result, ctx->opad, blocksize,
952 req->result, ctx->digestsize, NULL, 0);
953 break;
954 case HASH_ALG_SHA256:
955 rc = do_shash("sha256", req->result, ctx->opad, blocksize,
956 req->result, ctx->digestsize, NULL, 0);
957 break;
958 case HASH_ALG_SHA384:
959 rc = do_shash("sha384", req->result, ctx->opad, blocksize,
960 req->result, ctx->digestsize, NULL, 0);
961 break;
962 case HASH_ALG_SHA512:
963 rc = do_shash("sha512", req->result, ctx->opad, blocksize,
964 req->result, ctx->digestsize, NULL, 0);
965 break;
966 default:
967 pr_err("%s() Error : unknown hmac type\n", __func__);
968 rc = -EINVAL;
969 }
970 return rc;
971 }
972
973
974
975
976
977
978
979
980 static int ahash_req_done(struct iproc_reqctx_s *rctx)
981 {
982 struct spu_hw *spu = &iproc_priv.spu;
983 struct crypto_async_request *areq = rctx->parent;
984 struct ahash_request *req = ahash_request_cast(areq);
985 struct iproc_ctx_s *ctx = rctx->ctx;
986 int err;
987
988 memcpy(req->result, rctx->msg_buf.digest, ctx->digestsize);
989
990 if (spu->spu_type == SPU_TYPE_SPUM) {
991
992
993
994 if (ctx->auth.alg == HASH_ALG_MD5) {
995 __swab32s((u32 *)req->result);
996 __swab32s(((u32 *)req->result) + 1);
997 __swab32s(((u32 *)req->result) + 2);
998 __swab32s(((u32 *)req->result) + 3);
999 __swab32s(((u32 *)req->result) + 4);
1000 }
1001 }
1002
1003 flow_dump(" digest ", req->result, ctx->digestsize);
1004
1005
1006 if (rctx->is_sw_hmac) {
1007 err = spu_hmac_outer_hash(req, ctx);
1008 if (err < 0)
1009 return err;
1010 flow_dump(" hmac: ", req->result, ctx->digestsize);
1011 }
1012
1013 if (rctx->is_sw_hmac || ctx->auth.mode == HASH_MODE_HMAC) {
1014 atomic_inc(&iproc_priv.op_counts[SPU_OP_HMAC]);
1015 atomic_inc(&iproc_priv.hmac_cnt[ctx->auth.alg]);
1016 } else {
1017 atomic_inc(&iproc_priv.op_counts[SPU_OP_HASH]);
1018 atomic_inc(&iproc_priv.hash_cnt[ctx->auth.alg]);
1019 }
1020
1021 return 0;
1022 }
1023
1024
1025
1026
1027
1028
1029
1030 static void handle_ahash_resp(struct iproc_reqctx_s *rctx)
1031 {
1032 struct iproc_ctx_s *ctx = rctx->ctx;
1033 #ifdef DEBUG
1034 struct crypto_async_request *areq = rctx->parent;
1035 struct ahash_request *req = ahash_request_cast(areq);
1036 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1037 unsigned int blocksize =
1038 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
1039 #endif
1040
1041
1042
1043
1044 memcpy(rctx->incr_hash, rctx->msg_buf.digest, MAX_DIGEST_SIZE);
1045
1046 flow_log("%s() blocksize:%u digestsize:%u\n",
1047 __func__, blocksize, ctx->digestsize);
1048
1049 atomic64_add(ctx->digestsize, &iproc_priv.bytes_in);
1050
1051 if (rctx->is_final && (rctx->total_sent == rctx->total_todo))
1052 ahash_req_done(rctx);
1053 }
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079 static int spu_aead_rx_sg_create(struct brcm_message *mssg,
1080 struct aead_request *req,
1081 struct iproc_reqctx_s *rctx,
1082 u8 rx_frag_num,
1083 unsigned int assoc_len,
1084 u32 ret_iv_len, unsigned int resp_len,
1085 unsigned int digestsize, u32 stat_pad_len)
1086 {
1087 struct spu_hw *spu = &iproc_priv.spu;
1088 struct scatterlist *sg;
1089 struct iproc_ctx_s *ctx = rctx->ctx;
1090 u32 datalen;
1091 u32 assoc_buf_len;
1092 u8 data_padlen = 0;
1093
1094 if (ctx->is_rfc4543) {
1095
1096 data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1097 assoc_len + resp_len);
1098 assoc_buf_len = assoc_len;
1099 } else {
1100 data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1101 resp_len);
1102 assoc_buf_len = spu->spu_assoc_resp_len(ctx->cipher.mode,
1103 assoc_len, ret_iv_len,
1104 rctx->is_encrypt);
1105 }
1106
1107 if (ctx->cipher.mode == CIPHER_MODE_CCM)
1108
1109 data_padlen += spu->spu_wordalign_padlen(assoc_buf_len +
1110 resp_len +
1111 data_padlen);
1112
1113 if (data_padlen)
1114
1115 rx_frag_num++;
1116
1117 mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
1118 rctx->gfp);
1119 if (!mssg->spu.dst)
1120 return -ENOMEM;
1121
1122 sg = mssg->spu.dst;
1123 sg_init_table(sg, rx_frag_num);
1124
1125
1126 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
1127
1128 if (assoc_buf_len) {
1129
1130
1131
1132
1133 memset(rctx->msg_buf.a.resp_aad, 0, assoc_buf_len);
1134 sg_set_buf(sg++, rctx->msg_buf.a.resp_aad, assoc_buf_len);
1135 }
1136
1137 if (resp_len) {
1138
1139
1140
1141
1142 datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
1143 rctx->dst_nents, resp_len);
1144 if (datalen < (resp_len)) {
1145 pr_err("%s(): failed to copy dst sg to mbox msg. expected len %u, datalen %u",
1146 __func__, resp_len, datalen);
1147 return -EFAULT;
1148 }
1149 }
1150
1151
1152 if (data_padlen) {
1153 memset(rctx->msg_buf.a.gcmpad, 0, data_padlen);
1154 sg_set_buf(sg++, rctx->msg_buf.a.gcmpad, data_padlen);
1155 }
1156
1157
1158 sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
1159
1160 flow_log("stat_pad_len %u\n", stat_pad_len);
1161 if (stat_pad_len) {
1162 memset(rctx->msg_buf.rx_stat_pad, 0, stat_pad_len);
1163 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
1164 }
1165
1166 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
1167 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
1168
1169 return 0;
1170 }
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199 static int spu_aead_tx_sg_create(struct brcm_message *mssg,
1200 struct iproc_reqctx_s *rctx,
1201 u8 tx_frag_num,
1202 u32 spu_hdr_len,
1203 struct scatterlist *assoc,
1204 unsigned int assoc_len,
1205 int assoc_nents,
1206 unsigned int aead_iv_len,
1207 unsigned int chunksize,
1208 u32 aad_pad_len, u32 pad_len, bool incl_icv)
1209 {
1210 struct spu_hw *spu = &iproc_priv.spu;
1211 struct scatterlist *sg;
1212 struct scatterlist *assoc_sg = assoc;
1213 struct iproc_ctx_s *ctx = rctx->ctx;
1214 u32 datalen;
1215 u32 written;
1216 u32 assoc_offset = 0;
1217 u32 stat_len;
1218
1219 mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
1220 rctx->gfp);
1221 if (!mssg->spu.src)
1222 return -ENOMEM;
1223
1224 sg = mssg->spu.src;
1225 sg_init_table(sg, tx_frag_num);
1226
1227 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
1228 BCM_HDR_LEN + spu_hdr_len);
1229
1230 if (assoc_len) {
1231
1232 written = spu_msg_sg_add(&sg, &assoc_sg, &assoc_offset,
1233 assoc_nents, assoc_len);
1234 if (written < assoc_len) {
1235 pr_err("%s(): failed to copy assoc sg to mbox msg",
1236 __func__);
1237 return -EFAULT;
1238 }
1239 }
1240
1241 if (aead_iv_len)
1242 sg_set_buf(sg++, rctx->msg_buf.iv_ctr, aead_iv_len);
1243
1244 if (aad_pad_len) {
1245 memset(rctx->msg_buf.a.req_aad_pad, 0, aad_pad_len);
1246 sg_set_buf(sg++, rctx->msg_buf.a.req_aad_pad, aad_pad_len);
1247 }
1248
1249 datalen = chunksize;
1250 if ((chunksize > ctx->digestsize) && incl_icv)
1251 datalen -= ctx->digestsize;
1252 if (datalen) {
1253
1254 written = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
1255 rctx->src_nents, datalen);
1256 if (written < datalen) {
1257 pr_err("%s(): failed to copy src sg to mbox msg",
1258 __func__);
1259 return -EFAULT;
1260 }
1261 }
1262
1263 if (pad_len) {
1264 memset(rctx->msg_buf.spu_req_pad, 0, pad_len);
1265 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
1266 }
1267
1268 if (incl_icv)
1269 sg_set_buf(sg++, rctx->msg_buf.digest, ctx->digestsize);
1270
1271 stat_len = spu->spu_tx_status_len();
1272 if (stat_len) {
1273 memset(rctx->msg_buf.tx_stat, 0, stat_len);
1274 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
1275 }
1276 return 0;
1277 }
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296 static int handle_aead_req(struct iproc_reqctx_s *rctx)
1297 {
1298 struct spu_hw *spu = &iproc_priv.spu;
1299 struct crypto_async_request *areq = rctx->parent;
1300 struct aead_request *req = container_of(areq,
1301 struct aead_request, base);
1302 struct iproc_ctx_s *ctx = rctx->ctx;
1303 int err;
1304 unsigned int chunksize;
1305 unsigned int resp_len;
1306 u32 spu_hdr_len;
1307 u32 db_size;
1308 u32 stat_pad_len;
1309 u32 pad_len;
1310 struct brcm_message *mssg;
1311 struct spu_request_opts req_opts;
1312 struct spu_cipher_parms cipher_parms;
1313 struct spu_hash_parms hash_parms;
1314 struct spu_aead_parms aead_parms;
1315 int assoc_nents = 0;
1316 bool incl_icv = false;
1317 unsigned int digestsize = ctx->digestsize;
1318
1319
1320
1321 u8 rx_frag_num = 2;
1322 u8 tx_frag_num = 1;
1323
1324
1325 chunksize = rctx->total_todo;
1326
1327 flow_log("%s: chunksize %u\n", __func__, chunksize);
1328
1329 memset(&req_opts, 0, sizeof(req_opts));
1330 memset(&hash_parms, 0, sizeof(hash_parms));
1331 memset(&aead_parms, 0, sizeof(aead_parms));
1332
1333 req_opts.is_inbound = !(rctx->is_encrypt);
1334 req_opts.auth_first = ctx->auth_first;
1335 req_opts.is_aead = true;
1336 req_opts.is_esp = ctx->is_esp;
1337
1338 cipher_parms.alg = ctx->cipher.alg;
1339 cipher_parms.mode = ctx->cipher.mode;
1340 cipher_parms.type = ctx->cipher_type;
1341 cipher_parms.key_buf = ctx->enckey;
1342 cipher_parms.key_len = ctx->enckeylen;
1343 cipher_parms.iv_buf = rctx->msg_buf.iv_ctr;
1344 cipher_parms.iv_len = rctx->iv_ctr_len;
1345
1346 hash_parms.alg = ctx->auth.alg;
1347 hash_parms.mode = ctx->auth.mode;
1348 hash_parms.type = HASH_TYPE_NONE;
1349 hash_parms.key_buf = (u8 *)ctx->authkey;
1350 hash_parms.key_len = ctx->authkeylen;
1351 hash_parms.digestsize = digestsize;
1352
1353 if ((ctx->auth.alg == HASH_ALG_SHA224) &&
1354 (ctx->authkeylen < SHA224_DIGEST_SIZE))
1355 hash_parms.key_len = SHA224_DIGEST_SIZE;
1356
1357 aead_parms.assoc_size = req->assoclen;
1358 if (ctx->is_esp && !ctx->is_rfc4543) {
1359
1360
1361
1362
1363
1364 aead_parms.assoc_size -= GCM_RFC4106_IV_SIZE;
1365
1366 if (rctx->is_encrypt) {
1367 aead_parms.return_iv = true;
1368 aead_parms.ret_iv_len = GCM_RFC4106_IV_SIZE;
1369 aead_parms.ret_iv_off = GCM_ESP_SALT_SIZE;
1370 }
1371 } else {
1372 aead_parms.ret_iv_len = 0;
1373 }
1374
1375
1376
1377
1378
1379
1380
1381 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
1382 rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
1383 if (aead_parms.assoc_size)
1384 assoc_nents = spu_sg_count(rctx->assoc, 0,
1385 aead_parms.assoc_size);
1386
1387 mssg = &rctx->mb_mssg;
1388
1389 rctx->total_sent = chunksize;
1390 rctx->src_sent = chunksize;
1391 if (spu->spu_assoc_resp_len(ctx->cipher.mode,
1392 aead_parms.assoc_size,
1393 aead_parms.ret_iv_len,
1394 rctx->is_encrypt))
1395 rx_frag_num++;
1396
1397 aead_parms.iv_len = spu->spu_aead_ivlen(ctx->cipher.mode,
1398 rctx->iv_ctr_len);
1399
1400 if (ctx->auth.alg == HASH_ALG_AES)
1401 hash_parms.type = (enum hash_type)ctx->cipher_type;
1402
1403
1404 aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1405 aead_parms.assoc_size);
1406
1407
1408 aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1409 chunksize);
1410
1411 if (ctx->cipher.mode == CIPHER_MODE_CCM) {
1412
1413
1414
1415
1416 aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(
1417 ctx->cipher.mode,
1418 aead_parms.assoc_size + 2);
1419
1420
1421
1422
1423
1424 if (!rctx->is_encrypt)
1425 aead_parms.data_pad_len =
1426 spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1427 chunksize - digestsize);
1428
1429
1430 spu->spu_ccm_update_iv(digestsize, &cipher_parms, req->assoclen,
1431 chunksize, rctx->is_encrypt,
1432 ctx->is_esp);
1433 }
1434
1435 if (ctx->is_rfc4543) {
1436
1437
1438
1439
1440 aead_parms.aad_pad_len = 0;
1441 if (!rctx->is_encrypt)
1442 aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
1443 ctx->cipher.mode,
1444 aead_parms.assoc_size + chunksize -
1445 digestsize);
1446 else
1447 aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
1448 ctx->cipher.mode,
1449 aead_parms.assoc_size + chunksize);
1450
1451 req_opts.is_rfc4543 = true;
1452 }
1453
1454 if (spu_req_incl_icv(ctx->cipher.mode, rctx->is_encrypt)) {
1455 incl_icv = true;
1456 tx_frag_num++;
1457
1458 sg_copy_part_to_buf(req->src, rctx->msg_buf.digest, digestsize,
1459 req->assoclen + rctx->total_sent -
1460 digestsize);
1461 }
1462
1463 atomic64_add(chunksize, &iproc_priv.bytes_out);
1464
1465 flow_log("%s()-sent chunksize:%u\n", __func__, chunksize);
1466
1467
1468 memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
1469
1470 spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
1471 BCM_HDR_LEN, &req_opts,
1472 &cipher_parms, &hash_parms,
1473 &aead_parms, chunksize);
1474
1475
1476 db_size = spu_real_db_size(aead_parms.assoc_size, aead_parms.iv_len, 0,
1477 chunksize, aead_parms.aad_pad_len,
1478 aead_parms.data_pad_len, 0);
1479
1480 stat_pad_len = spu->spu_wordalign_padlen(db_size);
1481
1482 if (stat_pad_len)
1483 rx_frag_num++;
1484 pad_len = aead_parms.data_pad_len + stat_pad_len;
1485 if (pad_len) {
1486 tx_frag_num++;
1487 spu->spu_request_pad(rctx->msg_buf.spu_req_pad,
1488 aead_parms.data_pad_len, 0,
1489 ctx->auth.alg, ctx->auth.mode,
1490 rctx->total_sent, stat_pad_len);
1491 }
1492
1493 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
1494 spu_hdr_len);
1495 dump_sg(rctx->assoc, 0, aead_parms.assoc_size);
1496 packet_dump(" aead iv: ", rctx->msg_buf.iv_ctr, aead_parms.iv_len);
1497 packet_log("BD:\n");
1498 dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
1499 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len);
1500
1501
1502
1503
1504
1505 memset(mssg, 0, sizeof(*mssg));
1506 mssg->type = BRCM_MESSAGE_SPU;
1507 mssg->ctx = rctx;
1508
1509
1510 rx_frag_num += rctx->dst_nents;
1511 resp_len = chunksize;
1512
1513
1514
1515
1516
1517
1518 rx_frag_num++;
1519
1520 if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
1521 (ctx->cipher.mode == CIPHER_MODE_CCM)) && !rctx->is_encrypt) {
1522
1523
1524
1525
1526 resp_len -= ctx->digestsize;
1527 if (resp_len == 0)
1528
1529 rx_frag_num -= rctx->dst_nents;
1530 }
1531
1532 err = spu_aead_rx_sg_create(mssg, req, rctx, rx_frag_num,
1533 aead_parms.assoc_size,
1534 aead_parms.ret_iv_len, resp_len, digestsize,
1535 stat_pad_len);
1536 if (err)
1537 return err;
1538
1539
1540 tx_frag_num += rctx->src_nents;
1541 tx_frag_num += assoc_nents;
1542 if (aead_parms.aad_pad_len)
1543 tx_frag_num++;
1544 if (aead_parms.iv_len)
1545 tx_frag_num++;
1546 if (spu->spu_tx_status_len())
1547 tx_frag_num++;
1548 err = spu_aead_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
1549 rctx->assoc, aead_parms.assoc_size,
1550 assoc_nents, aead_parms.iv_len, chunksize,
1551 aead_parms.aad_pad_len, pad_len, incl_icv);
1552 if (err)
1553 return err;
1554
1555 err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
1556 if (unlikely(err < 0))
1557 return err;
1558
1559 return -EINPROGRESS;
1560 }
1561
1562
1563
1564
1565
1566 static void handle_aead_resp(struct iproc_reqctx_s *rctx)
1567 {
1568 struct spu_hw *spu = &iproc_priv.spu;
1569 struct crypto_async_request *areq = rctx->parent;
1570 struct aead_request *req = container_of(areq,
1571 struct aead_request, base);
1572 struct iproc_ctx_s *ctx = rctx->ctx;
1573 u32 payload_len;
1574 unsigned int icv_offset;
1575 u32 result_len;
1576
1577
1578 payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
1579 flow_log("payload_len %u\n", payload_len);
1580
1581
1582 atomic64_add(payload_len, &iproc_priv.bytes_in);
1583
1584 if (req->assoclen)
1585 packet_dump(" assoc_data ", rctx->msg_buf.a.resp_aad,
1586 req->assoclen);
1587
1588
1589
1590
1591
1592
1593 result_len = req->cryptlen;
1594 if (rctx->is_encrypt) {
1595 icv_offset = req->assoclen + rctx->total_sent;
1596 packet_dump(" ICV: ", rctx->msg_buf.digest, ctx->digestsize);
1597 flow_log("copying ICV to dst sg at offset %u\n", icv_offset);
1598 sg_copy_part_from_buf(req->dst, rctx->msg_buf.digest,
1599 ctx->digestsize, icv_offset);
1600 result_len += ctx->digestsize;
1601 }
1602
1603 packet_log("response data: ");
1604 dump_sg(req->dst, req->assoclen, result_len);
1605
1606 atomic_inc(&iproc_priv.op_counts[SPU_OP_AEAD]);
1607 if (ctx->cipher.alg == CIPHER_ALG_AES) {
1608 if (ctx->cipher.mode == CIPHER_MODE_CCM)
1609 atomic_inc(&iproc_priv.aead_cnt[AES_CCM]);
1610 else if (ctx->cipher.mode == CIPHER_MODE_GCM)
1611 atomic_inc(&iproc_priv.aead_cnt[AES_GCM]);
1612 else
1613 atomic_inc(&iproc_priv.aead_cnt[AUTHENC]);
1614 } else {
1615 atomic_inc(&iproc_priv.aead_cnt[AUTHENC]);
1616 }
1617 }
1618
1619
1620
1621
1622
1623
1624
1625
1626 static void spu_chunk_cleanup(struct iproc_reqctx_s *rctx)
1627 {
1628
1629 struct brcm_message *mssg = &rctx->mb_mssg;
1630
1631 kfree(mssg->spu.src);
1632 kfree(mssg->spu.dst);
1633 memset(mssg, 0, sizeof(struct brcm_message));
1634 }
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644 static void finish_req(struct iproc_reqctx_s *rctx, int err)
1645 {
1646 struct crypto_async_request *areq = rctx->parent;
1647
1648 flow_log("%s() err:%d\n\n", __func__, err);
1649
1650
1651 spu_chunk_cleanup(rctx);
1652
1653 if (areq)
1654 areq->complete(areq, err);
1655 }
1656
1657
1658
1659
1660
1661
1662 static void spu_rx_callback(struct mbox_client *cl, void *msg)
1663 {
1664 struct spu_hw *spu = &iproc_priv.spu;
1665 struct brcm_message *mssg = msg;
1666 struct iproc_reqctx_s *rctx;
1667 int err = 0;
1668
1669 rctx = mssg->ctx;
1670 if (unlikely(!rctx)) {
1671
1672 pr_err("%s(): no request context", __func__);
1673 err = -EFAULT;
1674 goto cb_finish;
1675 }
1676
1677
1678 err = spu->spu_status_process(rctx->msg_buf.rx_stat);
1679 if (err != 0) {
1680 if (err == SPU_INVALID_ICV)
1681 atomic_inc(&iproc_priv.bad_icv);
1682 err = -EBADMSG;
1683 goto cb_finish;
1684 }
1685
1686
1687 switch (rctx->ctx->alg->type) {
1688 case CRYPTO_ALG_TYPE_ABLKCIPHER:
1689 handle_ablkcipher_resp(rctx);
1690 break;
1691 case CRYPTO_ALG_TYPE_AHASH:
1692 handle_ahash_resp(rctx);
1693 break;
1694 case CRYPTO_ALG_TYPE_AEAD:
1695 handle_aead_resp(rctx);
1696 break;
1697 default:
1698 err = -EINVAL;
1699 goto cb_finish;
1700 }
1701
1702
1703
1704
1705
1706 if (rctx->total_sent < rctx->total_todo) {
1707
1708 spu_chunk_cleanup(rctx);
1709
1710 switch (rctx->ctx->alg->type) {
1711 case CRYPTO_ALG_TYPE_ABLKCIPHER:
1712 err = handle_ablkcipher_req(rctx);
1713 break;
1714 case CRYPTO_ALG_TYPE_AHASH:
1715 err = handle_ahash_req(rctx);
1716 if (err == -EAGAIN)
1717
1718
1719
1720
1721 err = 0;
1722 break;
1723 case CRYPTO_ALG_TYPE_AEAD:
1724 err = handle_aead_req(rctx);
1725 break;
1726 default:
1727 err = -EINVAL;
1728 }
1729
1730 if (err == -EINPROGRESS)
1731
1732 return;
1733 }
1734
1735 cb_finish:
1736 finish_req(rctx, err);
1737 }
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750 static int ablkcipher_enqueue(struct ablkcipher_request *req, bool encrypt)
1751 {
1752 struct iproc_reqctx_s *rctx = ablkcipher_request_ctx(req);
1753 struct iproc_ctx_s *ctx =
1754 crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
1755 int err;
1756
1757 flow_log("%s() enc:%u\n", __func__, encrypt);
1758
1759 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1760 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1761 rctx->parent = &req->base;
1762 rctx->is_encrypt = encrypt;
1763 rctx->bd_suppress = false;
1764 rctx->total_todo = req->nbytes;
1765 rctx->src_sent = 0;
1766 rctx->total_sent = 0;
1767 rctx->total_received = 0;
1768 rctx->ctx = ctx;
1769
1770
1771 rctx->src_sg = req->src;
1772 rctx->src_nents = 0;
1773 rctx->src_skip = 0;
1774 rctx->dst_sg = req->dst;
1775 rctx->dst_nents = 0;
1776 rctx->dst_skip = 0;
1777
1778 if (ctx->cipher.mode == CIPHER_MODE_CBC ||
1779 ctx->cipher.mode == CIPHER_MODE_CTR ||
1780 ctx->cipher.mode == CIPHER_MODE_OFB ||
1781 ctx->cipher.mode == CIPHER_MODE_XTS ||
1782 ctx->cipher.mode == CIPHER_MODE_GCM ||
1783 ctx->cipher.mode == CIPHER_MODE_CCM) {
1784 rctx->iv_ctr_len =
1785 crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
1786 memcpy(rctx->msg_buf.iv_ctr, req->info, rctx->iv_ctr_len);
1787 } else {
1788 rctx->iv_ctr_len = 0;
1789 }
1790
1791
1792 rctx->chan_idx = select_channel();
1793 err = handle_ablkcipher_req(rctx);
1794 if (err != -EINPROGRESS)
1795
1796 spu_chunk_cleanup(rctx);
1797
1798 return err;
1799 }
1800
1801 static int des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1802 unsigned int keylen)
1803 {
1804 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1805 int err;
1806
1807 err = verify_ablkcipher_des_key(cipher, key);
1808 if (err)
1809 return err;
1810
1811 ctx->cipher_type = CIPHER_TYPE_DES;
1812 return 0;
1813 }
1814
1815 static int threedes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1816 unsigned int keylen)
1817 {
1818 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1819 int err;
1820
1821 err = verify_ablkcipher_des3_key(cipher, key);
1822 if (err)
1823 return err;
1824
1825 ctx->cipher_type = CIPHER_TYPE_3DES;
1826 return 0;
1827 }
1828
1829 static int aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1830 unsigned int keylen)
1831 {
1832 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1833
1834 if (ctx->cipher.mode == CIPHER_MODE_XTS)
1835
1836 keylen = keylen / 2;
1837
1838 switch (keylen) {
1839 case AES_KEYSIZE_128:
1840 ctx->cipher_type = CIPHER_TYPE_AES128;
1841 break;
1842 case AES_KEYSIZE_192:
1843 ctx->cipher_type = CIPHER_TYPE_AES192;
1844 break;
1845 case AES_KEYSIZE_256:
1846 ctx->cipher_type = CIPHER_TYPE_AES256;
1847 break;
1848 default:
1849 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1850 return -EINVAL;
1851 }
1852 WARN_ON((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
1853 ((ctx->max_payload % AES_BLOCK_SIZE) != 0));
1854 return 0;
1855 }
1856
1857 static int rc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1858 unsigned int keylen)
1859 {
1860 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1861 int i;
1862
1863 ctx->enckeylen = ARC4_MAX_KEY_SIZE + ARC4_STATE_SIZE;
1864
1865 ctx->enckey[0] = 0x00;
1866 ctx->enckey[1] = 0x00;
1867 ctx->enckey[2] = 0x00;
1868 ctx->enckey[3] = 0x00;
1869 for (i = 0; i < ARC4_MAX_KEY_SIZE; i++)
1870 ctx->enckey[i + ARC4_STATE_SIZE] = key[i % keylen];
1871
1872 ctx->cipher_type = CIPHER_TYPE_INIT;
1873
1874 return 0;
1875 }
1876
1877 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1878 unsigned int keylen)
1879 {
1880 struct spu_hw *spu = &iproc_priv.spu;
1881 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1882 struct spu_cipher_parms cipher_parms;
1883 u32 alloc_len = 0;
1884 int err;
1885
1886 flow_log("ablkcipher_setkey() keylen: %d\n", keylen);
1887 flow_dump(" key: ", key, keylen);
1888
1889 switch (ctx->cipher.alg) {
1890 case CIPHER_ALG_DES:
1891 err = des_setkey(cipher, key, keylen);
1892 break;
1893 case CIPHER_ALG_3DES:
1894 err = threedes_setkey(cipher, key, keylen);
1895 break;
1896 case CIPHER_ALG_AES:
1897 err = aes_setkey(cipher, key, keylen);
1898 break;
1899 case CIPHER_ALG_RC4:
1900 err = rc4_setkey(cipher, key, keylen);
1901 break;
1902 default:
1903 pr_err("%s() Error: unknown cipher alg\n", __func__);
1904 err = -EINVAL;
1905 }
1906 if (err)
1907 return err;
1908
1909
1910 if (ctx->cipher.alg != CIPHER_ALG_RC4) {
1911 memcpy(ctx->enckey, key, keylen);
1912 ctx->enckeylen = keylen;
1913 }
1914
1915 if ((ctx->cipher.alg == CIPHER_ALG_AES) &&
1916 (ctx->cipher.mode == CIPHER_MODE_XTS)) {
1917 unsigned int xts_keylen = keylen / 2;
1918
1919 memcpy(ctx->enckey, key + xts_keylen, xts_keylen);
1920 memcpy(ctx->enckey + xts_keylen, key, xts_keylen);
1921 }
1922
1923 if (spu->spu_type == SPU_TYPE_SPUM)
1924 alloc_len = BCM_HDR_LEN + SPU_HEADER_ALLOC_LEN;
1925 else if (spu->spu_type == SPU_TYPE_SPU2)
1926 alloc_len = BCM_HDR_LEN + SPU2_HEADER_ALLOC_LEN;
1927 memset(ctx->bcm_spu_req_hdr, 0, alloc_len);
1928 cipher_parms.iv_buf = NULL;
1929 cipher_parms.iv_len = crypto_ablkcipher_ivsize(cipher);
1930 flow_log("%s: iv_len %u\n", __func__, cipher_parms.iv_len);
1931
1932 cipher_parms.alg = ctx->cipher.alg;
1933 cipher_parms.mode = ctx->cipher.mode;
1934 cipher_parms.type = ctx->cipher_type;
1935 cipher_parms.key_buf = ctx->enckey;
1936 cipher_parms.key_len = ctx->enckeylen;
1937
1938
1939 memcpy(ctx->bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
1940 ctx->spu_req_hdr_len =
1941 spu->spu_cipher_req_init(ctx->bcm_spu_req_hdr + BCM_HDR_LEN,
1942 &cipher_parms);
1943
1944 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
1945 ctx->enckeylen,
1946 false);
1947
1948 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_CIPHER]);
1949
1950 return 0;
1951 }
1952
1953 static int ablkcipher_encrypt(struct ablkcipher_request *req)
1954 {
1955 flow_log("ablkcipher_encrypt() nbytes:%u\n", req->nbytes);
1956
1957 return ablkcipher_enqueue(req, true);
1958 }
1959
1960 static int ablkcipher_decrypt(struct ablkcipher_request *req)
1961 {
1962 flow_log("ablkcipher_decrypt() nbytes:%u\n", req->nbytes);
1963 return ablkcipher_enqueue(req, false);
1964 }
1965
1966 static int ahash_enqueue(struct ahash_request *req)
1967 {
1968 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
1969 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1970 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
1971 int err = 0;
1972 const char *alg_name;
1973
1974 flow_log("ahash_enqueue() nbytes:%u\n", req->nbytes);
1975
1976 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1977 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1978 rctx->parent = &req->base;
1979 rctx->ctx = ctx;
1980 rctx->bd_suppress = true;
1981 memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
1982
1983
1984 rctx->src_sg = req->src;
1985 rctx->src_skip = 0;
1986 rctx->src_nents = 0;
1987 rctx->dst_sg = NULL;
1988 rctx->dst_skip = 0;
1989 rctx->dst_nents = 0;
1990
1991
1992 if ((rctx->is_final == 1) && (rctx->total_todo == 0) &&
1993 (iproc_priv.spu.spu_type == SPU_TYPE_SPU2)) {
1994 alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
1995 flow_log("Doing %sfinal %s zero-len hash request in software\n",
1996 rctx->is_final ? "" : "non-", alg_name);
1997 err = do_shash((unsigned char *)alg_name, req->result,
1998 NULL, 0, NULL, 0, ctx->authkey,
1999 ctx->authkeylen);
2000 if (err < 0)
2001 flow_log("Hash request failed with error %d\n", err);
2002 return err;
2003 }
2004
2005 rctx->chan_idx = select_channel();
2006
2007 err = handle_ahash_req(rctx);
2008 if (err != -EINPROGRESS)
2009
2010 spu_chunk_cleanup(rctx);
2011
2012 if (err == -EAGAIN)
2013
2014
2015
2016
2017 err = 0;
2018
2019 return err;
2020 }
2021
2022 static int __ahash_init(struct ahash_request *req)
2023 {
2024 struct spu_hw *spu = &iproc_priv.spu;
2025 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2026 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2027 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2028
2029 flow_log("%s()\n", __func__);
2030
2031
2032 rctx->hash_carry_len = 0;
2033 rctx->is_final = 0;
2034
2035 rctx->total_todo = 0;
2036 rctx->src_sent = 0;
2037 rctx->total_sent = 0;
2038 rctx->total_received = 0;
2039
2040 ctx->digestsize = crypto_ahash_digestsize(tfm);
2041
2042 WARN_ON(ctx->digestsize > MAX_DIGEST_SIZE);
2043
2044 rctx->is_sw_hmac = false;
2045
2046 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, 0,
2047 true);
2048
2049 return 0;
2050 }
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065 static bool spu_no_incr_hash(struct iproc_ctx_s *ctx)
2066 {
2067 struct spu_hw *spu = &iproc_priv.spu;
2068
2069 if (spu->spu_type == SPU_TYPE_SPU2)
2070 return true;
2071
2072 if ((ctx->auth.alg == HASH_ALG_AES) &&
2073 (ctx->auth.mode == HASH_MODE_XCBC))
2074 return true;
2075
2076
2077 return false;
2078 }
2079
2080 static int ahash_init(struct ahash_request *req)
2081 {
2082 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2083 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2084 const char *alg_name;
2085 struct crypto_shash *hash;
2086 int ret;
2087 gfp_t gfp;
2088
2089 if (spu_no_incr_hash(ctx)) {
2090
2091
2092
2093
2094
2095 alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
2096 hash = crypto_alloc_shash(alg_name, 0, 0);
2097 if (IS_ERR(hash)) {
2098 ret = PTR_ERR(hash);
2099 goto err;
2100 }
2101
2102 gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2103 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2104 ctx->shash = kmalloc(sizeof(*ctx->shash) +
2105 crypto_shash_descsize(hash), gfp);
2106 if (!ctx->shash) {
2107 ret = -ENOMEM;
2108 goto err_hash;
2109 }
2110 ctx->shash->tfm = hash;
2111
2112
2113 if (ctx->authkeylen > 0) {
2114 ret = crypto_shash_setkey(hash, ctx->authkey,
2115 ctx->authkeylen);
2116 if (ret)
2117 goto err_shash;
2118 }
2119
2120
2121 ret = crypto_shash_init(ctx->shash);
2122 if (ret)
2123 goto err_shash;
2124 } else {
2125
2126 ret = __ahash_init(req);
2127 }
2128
2129 return ret;
2130
2131 err_shash:
2132 kfree(ctx->shash);
2133 err_hash:
2134 crypto_free_shash(hash);
2135 err:
2136 return ret;
2137 }
2138
2139 static int __ahash_update(struct ahash_request *req)
2140 {
2141 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2142
2143 flow_log("ahash_update() nbytes:%u\n", req->nbytes);
2144
2145 if (!req->nbytes)
2146 return 0;
2147 rctx->total_todo += req->nbytes;
2148 rctx->src_sent = 0;
2149
2150 return ahash_enqueue(req);
2151 }
2152
2153 static int ahash_update(struct ahash_request *req)
2154 {
2155 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2156 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2157 u8 *tmpbuf;
2158 int ret;
2159 int nents;
2160 gfp_t gfp;
2161
2162 if (spu_no_incr_hash(ctx)) {
2163
2164
2165
2166
2167
2168 if (req->src)
2169 nents = sg_nents(req->src);
2170 else
2171 return -EINVAL;
2172
2173
2174 gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2175 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2176 tmpbuf = kmalloc(req->nbytes, gfp);
2177 if (!tmpbuf)
2178 return -ENOMEM;
2179
2180 if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
2181 req->nbytes) {
2182 kfree(tmpbuf);
2183 return -EINVAL;
2184 }
2185
2186
2187 ret = crypto_shash_update(ctx->shash, tmpbuf, req->nbytes);
2188 kfree(tmpbuf);
2189 } else {
2190
2191 ret = __ahash_update(req);
2192 }
2193
2194 return ret;
2195 }
2196
2197 static int __ahash_final(struct ahash_request *req)
2198 {
2199 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2200
2201 flow_log("ahash_final() nbytes:%u\n", req->nbytes);
2202
2203 rctx->is_final = 1;
2204
2205 return ahash_enqueue(req);
2206 }
2207
2208 static int ahash_final(struct ahash_request *req)
2209 {
2210 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2211 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2212 int ret;
2213
2214 if (spu_no_incr_hash(ctx)) {
2215
2216
2217
2218
2219
2220 ret = crypto_shash_final(ctx->shash, req->result);
2221
2222
2223 crypto_free_shash(ctx->shash->tfm);
2224 kfree(ctx->shash);
2225
2226 } else {
2227
2228 ret = __ahash_final(req);
2229 }
2230
2231 return ret;
2232 }
2233
2234 static int __ahash_finup(struct ahash_request *req)
2235 {
2236 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2237
2238 flow_log("ahash_finup() nbytes:%u\n", req->nbytes);
2239
2240 rctx->total_todo += req->nbytes;
2241 rctx->src_sent = 0;
2242 rctx->is_final = 1;
2243
2244 return ahash_enqueue(req);
2245 }
2246
2247 static int ahash_finup(struct ahash_request *req)
2248 {
2249 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2250 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2251 u8 *tmpbuf;
2252 int ret;
2253 int nents;
2254 gfp_t gfp;
2255
2256 if (spu_no_incr_hash(ctx)) {
2257
2258
2259
2260
2261
2262 if (req->src) {
2263 nents = sg_nents(req->src);
2264 } else {
2265 ret = -EINVAL;
2266 goto ahash_finup_exit;
2267 }
2268
2269
2270 gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2271 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2272 tmpbuf = kmalloc(req->nbytes, gfp);
2273 if (!tmpbuf) {
2274 ret = -ENOMEM;
2275 goto ahash_finup_exit;
2276 }
2277
2278 if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
2279 req->nbytes) {
2280 ret = -EINVAL;
2281 goto ahash_finup_free;
2282 }
2283
2284
2285 ret = crypto_shash_finup(ctx->shash, tmpbuf, req->nbytes,
2286 req->result);
2287 } else {
2288
2289 return __ahash_finup(req);
2290 }
2291 ahash_finup_free:
2292 kfree(tmpbuf);
2293
2294 ahash_finup_exit:
2295
2296 crypto_free_shash(ctx->shash->tfm);
2297 kfree(ctx->shash);
2298 return ret;
2299 }
2300
2301 static int ahash_digest(struct ahash_request *req)
2302 {
2303 int err = 0;
2304
2305 flow_log("ahash_digest() nbytes:%u\n", req->nbytes);
2306
2307
2308 err = __ahash_init(req);
2309 if (!err)
2310 err = __ahash_finup(req);
2311
2312 return err;
2313 }
2314
2315 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
2316 unsigned int keylen)
2317 {
2318 struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash);
2319
2320 flow_log("%s() ahash:%p key:%p keylen:%u\n",
2321 __func__, ahash, key, keylen);
2322 flow_dump(" key: ", key, keylen);
2323
2324 if (ctx->auth.alg == HASH_ALG_AES) {
2325 switch (keylen) {
2326 case AES_KEYSIZE_128:
2327 ctx->cipher_type = CIPHER_TYPE_AES128;
2328 break;
2329 case AES_KEYSIZE_192:
2330 ctx->cipher_type = CIPHER_TYPE_AES192;
2331 break;
2332 case AES_KEYSIZE_256:
2333 ctx->cipher_type = CIPHER_TYPE_AES256;
2334 break;
2335 default:
2336 pr_err("%s() Error: Invalid key length\n", __func__);
2337 return -EINVAL;
2338 }
2339 } else {
2340 pr_err("%s() Error: unknown hash alg\n", __func__);
2341 return -EINVAL;
2342 }
2343 memcpy(ctx->authkey, key, keylen);
2344 ctx->authkeylen = keylen;
2345
2346 return 0;
2347 }
2348
2349 static int ahash_export(struct ahash_request *req, void *out)
2350 {
2351 const struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2352 struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)out;
2353
2354 spu_exp->total_todo = rctx->total_todo;
2355 spu_exp->total_sent = rctx->total_sent;
2356 spu_exp->is_sw_hmac = rctx->is_sw_hmac;
2357 memcpy(spu_exp->hash_carry, rctx->hash_carry, sizeof(rctx->hash_carry));
2358 spu_exp->hash_carry_len = rctx->hash_carry_len;
2359 memcpy(spu_exp->incr_hash, rctx->incr_hash, sizeof(rctx->incr_hash));
2360
2361 return 0;
2362 }
2363
2364 static int ahash_import(struct ahash_request *req, const void *in)
2365 {
2366 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2367 struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)in;
2368
2369 rctx->total_todo = spu_exp->total_todo;
2370 rctx->total_sent = spu_exp->total_sent;
2371 rctx->is_sw_hmac = spu_exp->is_sw_hmac;
2372 memcpy(rctx->hash_carry, spu_exp->hash_carry, sizeof(rctx->hash_carry));
2373 rctx->hash_carry_len = spu_exp->hash_carry_len;
2374 memcpy(rctx->incr_hash, spu_exp->incr_hash, sizeof(rctx->incr_hash));
2375
2376 return 0;
2377 }
2378
2379 static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
2380 unsigned int keylen)
2381 {
2382 struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash);
2383 unsigned int blocksize =
2384 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
2385 unsigned int digestsize = crypto_ahash_digestsize(ahash);
2386 unsigned int index;
2387 int rc;
2388
2389 flow_log("%s() ahash:%p key:%p keylen:%u blksz:%u digestsz:%u\n",
2390 __func__, ahash, key, keylen, blocksize, digestsize);
2391 flow_dump(" key: ", key, keylen);
2392
2393 if (keylen > blocksize) {
2394 switch (ctx->auth.alg) {
2395 case HASH_ALG_MD5:
2396 rc = do_shash("md5", ctx->authkey, key, keylen, NULL,
2397 0, NULL, 0);
2398 break;
2399 case HASH_ALG_SHA1:
2400 rc = do_shash("sha1", ctx->authkey, key, keylen, NULL,
2401 0, NULL, 0);
2402 break;
2403 case HASH_ALG_SHA224:
2404 rc = do_shash("sha224", ctx->authkey, key, keylen, NULL,
2405 0, NULL, 0);
2406 break;
2407 case HASH_ALG_SHA256:
2408 rc = do_shash("sha256", ctx->authkey, key, keylen, NULL,
2409 0, NULL, 0);
2410 break;
2411 case HASH_ALG_SHA384:
2412 rc = do_shash("sha384", ctx->authkey, key, keylen, NULL,
2413 0, NULL, 0);
2414 break;
2415 case HASH_ALG_SHA512:
2416 rc = do_shash("sha512", ctx->authkey, key, keylen, NULL,
2417 0, NULL, 0);
2418 break;
2419 case HASH_ALG_SHA3_224:
2420 rc = do_shash("sha3-224", ctx->authkey, key, keylen,
2421 NULL, 0, NULL, 0);
2422 break;
2423 case HASH_ALG_SHA3_256:
2424 rc = do_shash("sha3-256", ctx->authkey, key, keylen,
2425 NULL, 0, NULL, 0);
2426 break;
2427 case HASH_ALG_SHA3_384:
2428 rc = do_shash("sha3-384", ctx->authkey, key, keylen,
2429 NULL, 0, NULL, 0);
2430 break;
2431 case HASH_ALG_SHA3_512:
2432 rc = do_shash("sha3-512", ctx->authkey, key, keylen,
2433 NULL, 0, NULL, 0);
2434 break;
2435 default:
2436 pr_err("%s() Error: unknown hash alg\n", __func__);
2437 return -EINVAL;
2438 }
2439 if (rc < 0) {
2440 pr_err("%s() Error %d computing shash for %s\n",
2441 __func__, rc, hash_alg_name[ctx->auth.alg]);
2442 return rc;
2443 }
2444 ctx->authkeylen = digestsize;
2445
2446 flow_log(" keylen > digestsize... hashed\n");
2447 flow_dump(" newkey: ", ctx->authkey, ctx->authkeylen);
2448 } else {
2449 memcpy(ctx->authkey, key, keylen);
2450 ctx->authkeylen = keylen;
2451 }
2452
2453
2454
2455
2456
2457
2458 if (iproc_priv.spu.spu_type == SPU_TYPE_SPUM) {
2459 memcpy(ctx->ipad, ctx->authkey, ctx->authkeylen);
2460 memset(ctx->ipad + ctx->authkeylen, 0,
2461 blocksize - ctx->authkeylen);
2462 ctx->authkeylen = 0;
2463 memcpy(ctx->opad, ctx->ipad, blocksize);
2464
2465 for (index = 0; index < blocksize; index++) {
2466 ctx->ipad[index] ^= HMAC_IPAD_VALUE;
2467 ctx->opad[index] ^= HMAC_OPAD_VALUE;
2468 }
2469
2470 flow_dump(" ipad: ", ctx->ipad, blocksize);
2471 flow_dump(" opad: ", ctx->opad, blocksize);
2472 }
2473 ctx->digestsize = digestsize;
2474 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_HMAC]);
2475
2476 return 0;
2477 }
2478
2479 static int ahash_hmac_init(struct ahash_request *req)
2480 {
2481 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2482 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2483 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2484 unsigned int blocksize =
2485 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2486
2487 flow_log("ahash_hmac_init()\n");
2488
2489
2490 ahash_init(req);
2491
2492 if (!spu_no_incr_hash(ctx)) {
2493
2494 rctx->is_sw_hmac = true;
2495 ctx->auth.mode = HASH_MODE_HASH;
2496
2497 memcpy(rctx->hash_carry, ctx->ipad, blocksize);
2498 rctx->hash_carry_len = blocksize;
2499 rctx->total_todo += blocksize;
2500 }
2501
2502 return 0;
2503 }
2504
2505 static int ahash_hmac_update(struct ahash_request *req)
2506 {
2507 flow_log("ahash_hmac_update() nbytes:%u\n", req->nbytes);
2508
2509 if (!req->nbytes)
2510 return 0;
2511
2512 return ahash_update(req);
2513 }
2514
2515 static int ahash_hmac_final(struct ahash_request *req)
2516 {
2517 flow_log("ahash_hmac_final() nbytes:%u\n", req->nbytes);
2518
2519 return ahash_final(req);
2520 }
2521
2522 static int ahash_hmac_finup(struct ahash_request *req)
2523 {
2524 flow_log("ahash_hmac_finupl() nbytes:%u\n", req->nbytes);
2525
2526 return ahash_finup(req);
2527 }
2528
2529 static int ahash_hmac_digest(struct ahash_request *req)
2530 {
2531 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2532 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2533 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2534 unsigned int blocksize =
2535 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2536
2537 flow_log("ahash_hmac_digest() nbytes:%u\n", req->nbytes);
2538
2539
2540 __ahash_init(req);
2541
2542 if (iproc_priv.spu.spu_type == SPU_TYPE_SPU2) {
2543
2544
2545
2546
2547
2548
2549
2550
2551 rctx->is_sw_hmac = false;
2552 ctx->auth.mode = HASH_MODE_HMAC;
2553 } else {
2554 rctx->is_sw_hmac = true;
2555 ctx->auth.mode = HASH_MODE_HASH;
2556
2557 memcpy(rctx->hash_carry, ctx->ipad, blocksize);
2558 rctx->hash_carry_len = blocksize;
2559 rctx->total_todo += blocksize;
2560 }
2561
2562 return __ahash_finup(req);
2563 }
2564
2565
2566
2567 static int aead_need_fallback(struct aead_request *req)
2568 {
2569 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2570 struct spu_hw *spu = &iproc_priv.spu;
2571 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2572 struct iproc_ctx_s *ctx = crypto_aead_ctx(aead);
2573 u32 payload_len;
2574
2575
2576
2577
2578
2579 if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
2580 (ctx->cipher.mode == CIPHER_MODE_CCM)) &&
2581 (req->assoclen == 0)) {
2582 if ((rctx->is_encrypt && (req->cryptlen == 0)) ||
2583 (!rctx->is_encrypt && (req->cryptlen == ctx->digestsize))) {
2584 flow_log("AES GCM/CCM needs fallback for 0 len req\n");
2585 return 1;
2586 }
2587 }
2588
2589
2590 if ((ctx->cipher.mode == CIPHER_MODE_CCM) &&
2591 (spu->spu_type == SPU_TYPE_SPUM) &&
2592 (ctx->digestsize != 8) && (ctx->digestsize != 12) &&
2593 (ctx->digestsize != 16)) {
2594 flow_log("%s() AES CCM needs fallback for digest size %d\n",
2595 __func__, ctx->digestsize);
2596 return 1;
2597 }
2598
2599
2600
2601
2602
2603 if ((ctx->cipher.mode == CIPHER_MODE_CCM) &&
2604 (spu->spu_subtype == SPU_SUBTYPE_SPUM_NSP) &&
2605 (req->assoclen == 0)) {
2606 flow_log("%s() AES_CCM needs fallback for 0 len AAD on NSP\n",
2607 __func__);
2608 return 1;
2609 }
2610
2611
2612
2613
2614
2615 if (ctx->cipher.mode == CIPHER_MODE_GCM &&
2616 ctx->cipher.alg == CIPHER_ALG_AES &&
2617 rctx->iv_ctr_len == GCM_RFC4106_IV_SIZE &&
2618 req->assoclen != 16 && req->assoclen != 20) {
2619 flow_log("RFC4106/RFC4543 needs fallback for assoclen"
2620 " other than 16 or 20 bytes\n");
2621 return 1;
2622 }
2623
2624 payload_len = req->cryptlen;
2625 if (spu->spu_type == SPU_TYPE_SPUM)
2626 payload_len += req->assoclen;
2627
2628 flow_log("%s() payload len: %u\n", __func__, payload_len);
2629
2630 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
2631 return 0;
2632 else
2633 return payload_len > ctx->max_payload;
2634 }
2635
2636 static void aead_complete(struct crypto_async_request *areq, int err)
2637 {
2638 struct aead_request *req =
2639 container_of(areq, struct aead_request, base);
2640 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2641 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2642
2643 flow_log("%s() err:%d\n", __func__, err);
2644
2645 areq->tfm = crypto_aead_tfm(aead);
2646
2647 areq->complete = rctx->old_complete;
2648 areq->data = rctx->old_data;
2649
2650 areq->complete(areq, err);
2651 }
2652
2653 static int aead_do_fallback(struct aead_request *req, bool is_encrypt)
2654 {
2655 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2656 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
2657 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2658 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
2659 int err;
2660 u32 req_flags;
2661
2662 flow_log("%s() enc:%u\n", __func__, is_encrypt);
2663
2664 if (ctx->fallback_cipher) {
2665
2666 rctx->old_tfm = tfm;
2667 aead_request_set_tfm(req, ctx->fallback_cipher);
2668
2669
2670
2671
2672 rctx->old_complete = req->base.complete;
2673 rctx->old_data = req->base.data;
2674 req_flags = aead_request_flags(req);
2675 aead_request_set_callback(req, req_flags, aead_complete, req);
2676 err = is_encrypt ? crypto_aead_encrypt(req) :
2677 crypto_aead_decrypt(req);
2678
2679 if (err == 0) {
2680
2681
2682
2683
2684 aead_request_set_callback(req, req_flags,
2685 rctx->old_complete, req);
2686 req->base.data = rctx->old_data;
2687 aead_request_set_tfm(req, aead);
2688 flow_log("%s() fallback completed successfully\n\n",
2689 __func__);
2690 }
2691 } else {
2692 err = -EINVAL;
2693 }
2694
2695 return err;
2696 }
2697
2698 static int aead_enqueue(struct aead_request *req, bool is_encrypt)
2699 {
2700 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2701 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2702 struct iproc_ctx_s *ctx = crypto_aead_ctx(aead);
2703 int err;
2704
2705 flow_log("%s() enc:%u\n", __func__, is_encrypt);
2706
2707 if (req->assoclen > MAX_ASSOC_SIZE) {
2708 pr_err
2709 ("%s() Error: associated data too long. (%u > %u bytes)\n",
2710 __func__, req->assoclen, MAX_ASSOC_SIZE);
2711 return -EINVAL;
2712 }
2713
2714 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2715 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2716 rctx->parent = &req->base;
2717 rctx->is_encrypt = is_encrypt;
2718 rctx->bd_suppress = false;
2719 rctx->total_todo = req->cryptlen;
2720 rctx->src_sent = 0;
2721 rctx->total_sent = 0;
2722 rctx->total_received = 0;
2723 rctx->is_sw_hmac = false;
2724 rctx->ctx = ctx;
2725 memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
2726
2727
2728 rctx->assoc = req->src;
2729
2730
2731
2732
2733
2734
2735 if (spu_sg_at_offset(req->src, req->assoclen, &rctx->src_sg,
2736 &rctx->src_skip) < 0) {
2737 pr_err("%s() Error: Unable to find start of src data\n",
2738 __func__);
2739 return -EINVAL;
2740 }
2741
2742 rctx->src_nents = 0;
2743 rctx->dst_nents = 0;
2744 if (req->dst == req->src) {
2745 rctx->dst_sg = rctx->src_sg;
2746 rctx->dst_skip = rctx->src_skip;
2747 } else {
2748
2749
2750
2751
2752
2753 if (spu_sg_at_offset(req->dst, req->assoclen, &rctx->dst_sg,
2754 &rctx->dst_skip) < 0) {
2755 pr_err("%s() Error: Unable to find start of dst data\n",
2756 __func__);
2757 return -EINVAL;
2758 }
2759 }
2760
2761 if (ctx->cipher.mode == CIPHER_MODE_CBC ||
2762 ctx->cipher.mode == CIPHER_MODE_CTR ||
2763 ctx->cipher.mode == CIPHER_MODE_OFB ||
2764 ctx->cipher.mode == CIPHER_MODE_XTS ||
2765 ctx->cipher.mode == CIPHER_MODE_GCM) {
2766 rctx->iv_ctr_len =
2767 ctx->salt_len +
2768 crypto_aead_ivsize(crypto_aead_reqtfm(req));
2769 } else if (ctx->cipher.mode == CIPHER_MODE_CCM) {
2770 rctx->iv_ctr_len = CCM_AES_IV_SIZE;
2771 } else {
2772 rctx->iv_ctr_len = 0;
2773 }
2774
2775 rctx->hash_carry_len = 0;
2776
2777 flow_log(" src sg: %p\n", req->src);
2778 flow_log(" rctx->src_sg: %p, src_skip %u\n",
2779 rctx->src_sg, rctx->src_skip);
2780 flow_log(" assoc: %p, assoclen %u\n", rctx->assoc, req->assoclen);
2781 flow_log(" dst sg: %p\n", req->dst);
2782 flow_log(" rctx->dst_sg: %p, dst_skip %u\n",
2783 rctx->dst_sg, rctx->dst_skip);
2784 flow_log(" iv_ctr_len:%u\n", rctx->iv_ctr_len);
2785 flow_dump(" iv: ", req->iv, rctx->iv_ctr_len);
2786 flow_log(" authkeylen:%u\n", ctx->authkeylen);
2787 flow_log(" is_esp: %s\n", ctx->is_esp ? "yes" : "no");
2788
2789 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
2790 flow_log(" max_payload infinite");
2791 else
2792 flow_log(" max_payload: %u\n", ctx->max_payload);
2793
2794 if (unlikely(aead_need_fallback(req)))
2795 return aead_do_fallback(req, is_encrypt);
2796
2797
2798
2799
2800
2801 if (rctx->iv_ctr_len) {
2802 if (ctx->salt_len)
2803 memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset,
2804 ctx->salt, ctx->salt_len);
2805 memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset + ctx->salt_len,
2806 req->iv,
2807 rctx->iv_ctr_len - ctx->salt_len - ctx->salt_offset);
2808 }
2809
2810 rctx->chan_idx = select_channel();
2811 err = handle_aead_req(rctx);
2812 if (err != -EINPROGRESS)
2813
2814 spu_chunk_cleanup(rctx);
2815
2816 return err;
2817 }
2818
2819 static int aead_authenc_setkey(struct crypto_aead *cipher,
2820 const u8 *key, unsigned int keylen)
2821 {
2822 struct spu_hw *spu = &iproc_priv.spu;
2823 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2824 struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
2825 struct crypto_authenc_keys keys;
2826 int ret;
2827
2828 flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
2829 keylen);
2830 flow_dump(" key: ", key, keylen);
2831
2832 ret = crypto_authenc_extractkeys(&keys, key, keylen);
2833 if (ret)
2834 goto badkey;
2835
2836 if (keys.enckeylen > MAX_KEY_SIZE ||
2837 keys.authkeylen > MAX_KEY_SIZE)
2838 goto badkey;
2839
2840 ctx->enckeylen = keys.enckeylen;
2841 ctx->authkeylen = keys.authkeylen;
2842
2843 memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
2844
2845 memset(ctx->authkey, 0, sizeof(ctx->authkey));
2846 memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
2847
2848 switch (ctx->alg->cipher_info.alg) {
2849 case CIPHER_ALG_DES:
2850 if (verify_aead_des_key(cipher, keys.enckey, keys.enckeylen))
2851 return -EINVAL;
2852
2853 ctx->cipher_type = CIPHER_TYPE_DES;
2854 break;
2855 case CIPHER_ALG_3DES:
2856 if (verify_aead_des3_key(cipher, keys.enckey, keys.enckeylen))
2857 return -EINVAL;
2858
2859 ctx->cipher_type = CIPHER_TYPE_3DES;
2860 break;
2861 case CIPHER_ALG_AES:
2862 switch (ctx->enckeylen) {
2863 case AES_KEYSIZE_128:
2864 ctx->cipher_type = CIPHER_TYPE_AES128;
2865 break;
2866 case AES_KEYSIZE_192:
2867 ctx->cipher_type = CIPHER_TYPE_AES192;
2868 break;
2869 case AES_KEYSIZE_256:
2870 ctx->cipher_type = CIPHER_TYPE_AES256;
2871 break;
2872 default:
2873 goto badkey;
2874 }
2875 break;
2876 case CIPHER_ALG_RC4:
2877 ctx->cipher_type = CIPHER_TYPE_INIT;
2878 break;
2879 default:
2880 pr_err("%s() Error: Unknown cipher alg\n", __func__);
2881 return -EINVAL;
2882 }
2883
2884 flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
2885 ctx->authkeylen);
2886 flow_dump(" enc: ", ctx->enckey, ctx->enckeylen);
2887 flow_dump(" auth: ", ctx->authkey, ctx->authkeylen);
2888
2889
2890 if (ctx->fallback_cipher) {
2891 flow_log(" running fallback setkey()\n");
2892
2893 ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
2894 ctx->fallback_cipher->base.crt_flags |=
2895 tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
2896 ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
2897 if (ret) {
2898 flow_log(" fallback setkey() returned:%d\n", ret);
2899 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
2900 tfm->crt_flags |=
2901 (ctx->fallback_cipher->base.crt_flags &
2902 CRYPTO_TFM_RES_MASK);
2903 }
2904 }
2905
2906 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
2907 ctx->enckeylen,
2908 false);
2909
2910 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]);
2911
2912 return ret;
2913
2914 badkey:
2915 ctx->enckeylen = 0;
2916 ctx->authkeylen = 0;
2917 ctx->digestsize = 0;
2918
2919 crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
2920 return -EINVAL;
2921 }
2922
2923 static int aead_gcm_ccm_setkey(struct crypto_aead *cipher,
2924 const u8 *key, unsigned int keylen)
2925 {
2926 struct spu_hw *spu = &iproc_priv.spu;
2927 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2928 struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
2929
2930 int ret = 0;
2931
2932 flow_log("%s() keylen:%u\n", __func__, keylen);
2933 flow_dump(" key: ", key, keylen);
2934
2935 if (!ctx->is_esp)
2936 ctx->digestsize = keylen;
2937
2938 ctx->enckeylen = keylen;
2939 ctx->authkeylen = 0;
2940 memcpy(ctx->enckey, key, ctx->enckeylen);
2941
2942 switch (ctx->enckeylen) {
2943 case AES_KEYSIZE_128:
2944 ctx->cipher_type = CIPHER_TYPE_AES128;
2945 break;
2946 case AES_KEYSIZE_192:
2947 ctx->cipher_type = CIPHER_TYPE_AES192;
2948 break;
2949 case AES_KEYSIZE_256:
2950 ctx->cipher_type = CIPHER_TYPE_AES256;
2951 break;
2952 default:
2953 goto badkey;
2954 }
2955
2956 flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
2957 ctx->authkeylen);
2958 flow_dump(" enc: ", ctx->enckey, ctx->enckeylen);
2959 flow_dump(" auth: ", ctx->authkey, ctx->authkeylen);
2960
2961
2962 if (ctx->fallback_cipher) {
2963 flow_log(" running fallback setkey()\n");
2964
2965 ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
2966 ctx->fallback_cipher->base.crt_flags |=
2967 tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
2968 ret = crypto_aead_setkey(ctx->fallback_cipher, key,
2969 keylen + ctx->salt_len);
2970 if (ret) {
2971 flow_log(" fallback setkey() returned:%d\n", ret);
2972 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
2973 tfm->crt_flags |=
2974 (ctx->fallback_cipher->base.crt_flags &
2975 CRYPTO_TFM_RES_MASK);
2976 }
2977 }
2978
2979 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
2980 ctx->enckeylen,
2981 false);
2982
2983 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]);
2984
2985 flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
2986 ctx->authkeylen);
2987
2988 return ret;
2989
2990 badkey:
2991 ctx->enckeylen = 0;
2992 ctx->authkeylen = 0;
2993 ctx->digestsize = 0;
2994
2995 crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
2996 return -EINVAL;
2997 }
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010 static int aead_gcm_esp_setkey(struct crypto_aead *cipher,
3011 const u8 *key, unsigned int keylen)
3012 {
3013 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3014
3015 flow_log("%s\n", __func__);
3016 ctx->salt_len = GCM_ESP_SALT_SIZE;
3017 ctx->salt_offset = GCM_ESP_SALT_OFFSET;
3018 memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
3019 keylen -= GCM_ESP_SALT_SIZE;
3020 ctx->digestsize = GCM_ESP_DIGESTSIZE;
3021 ctx->is_esp = true;
3022 flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE);
3023
3024 return aead_gcm_ccm_setkey(cipher, key, keylen);
3025 }
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038 static int rfc4543_gcm_esp_setkey(struct crypto_aead *cipher,
3039 const u8 *key, unsigned int keylen)
3040 {
3041 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3042
3043 flow_log("%s\n", __func__);
3044 ctx->salt_len = GCM_ESP_SALT_SIZE;
3045 ctx->salt_offset = GCM_ESP_SALT_OFFSET;
3046 memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
3047 keylen -= GCM_ESP_SALT_SIZE;
3048 ctx->digestsize = GCM_ESP_DIGESTSIZE;
3049 ctx->is_esp = true;
3050 ctx->is_rfc4543 = true;
3051 flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE);
3052
3053 return aead_gcm_ccm_setkey(cipher, key, keylen);
3054 }
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067 static int aead_ccm_esp_setkey(struct crypto_aead *cipher,
3068 const u8 *key, unsigned int keylen)
3069 {
3070 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3071
3072 flow_log("%s\n", __func__);
3073 ctx->salt_len = CCM_ESP_SALT_SIZE;
3074 ctx->salt_offset = CCM_ESP_SALT_OFFSET;
3075 memcpy(ctx->salt, key + keylen - CCM_ESP_SALT_SIZE, CCM_ESP_SALT_SIZE);
3076 keylen -= CCM_ESP_SALT_SIZE;
3077 ctx->is_esp = true;
3078 flow_dump("salt: ", ctx->salt, CCM_ESP_SALT_SIZE);
3079
3080 return aead_gcm_ccm_setkey(cipher, key, keylen);
3081 }
3082
3083 static int aead_setauthsize(struct crypto_aead *cipher, unsigned int authsize)
3084 {
3085 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3086 int ret = 0;
3087
3088 flow_log("%s() authkeylen:%u authsize:%u\n",
3089 __func__, ctx->authkeylen, authsize);
3090
3091 ctx->digestsize = authsize;
3092
3093
3094 if (ctx->fallback_cipher) {
3095 flow_log(" running fallback setauth()\n");
3096
3097 ret = crypto_aead_setauthsize(ctx->fallback_cipher, authsize);
3098 if (ret)
3099 flow_log(" fallback setauth() returned:%d\n", ret);
3100 }
3101
3102 return ret;
3103 }
3104
3105 static int aead_encrypt(struct aead_request *req)
3106 {
3107 flow_log("%s() cryptlen:%u %08x\n", __func__, req->cryptlen,
3108 req->cryptlen);
3109 dump_sg(req->src, 0, req->cryptlen + req->assoclen);
3110 flow_log(" assoc_len:%u\n", req->assoclen);
3111
3112 return aead_enqueue(req, true);
3113 }
3114
3115 static int aead_decrypt(struct aead_request *req)
3116 {
3117 flow_log("%s() cryptlen:%u\n", __func__, req->cryptlen);
3118 dump_sg(req->src, 0, req->cryptlen + req->assoclen);
3119 flow_log(" assoc_len:%u\n", req->assoclen);
3120
3121 return aead_enqueue(req, false);
3122 }
3123
3124
3125
3126 static struct iproc_alg_s driver_algs[] = {
3127 {
3128 .type = CRYPTO_ALG_TYPE_AEAD,
3129 .alg.aead = {
3130 .base = {
3131 .cra_name = "gcm(aes)",
3132 .cra_driver_name = "gcm-aes-iproc",
3133 .cra_blocksize = AES_BLOCK_SIZE,
3134 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3135 },
3136 .setkey = aead_gcm_ccm_setkey,
3137 .ivsize = GCM_AES_IV_SIZE,
3138 .maxauthsize = AES_BLOCK_SIZE,
3139 },
3140 .cipher_info = {
3141 .alg = CIPHER_ALG_AES,
3142 .mode = CIPHER_MODE_GCM,
3143 },
3144 .auth_info = {
3145 .alg = HASH_ALG_AES,
3146 .mode = HASH_MODE_GCM,
3147 },
3148 .auth_first = 0,
3149 },
3150 {
3151 .type = CRYPTO_ALG_TYPE_AEAD,
3152 .alg.aead = {
3153 .base = {
3154 .cra_name = "ccm(aes)",
3155 .cra_driver_name = "ccm-aes-iproc",
3156 .cra_blocksize = AES_BLOCK_SIZE,
3157 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3158 },
3159 .setkey = aead_gcm_ccm_setkey,
3160 .ivsize = CCM_AES_IV_SIZE,
3161 .maxauthsize = AES_BLOCK_SIZE,
3162 },
3163 .cipher_info = {
3164 .alg = CIPHER_ALG_AES,
3165 .mode = CIPHER_MODE_CCM,
3166 },
3167 .auth_info = {
3168 .alg = HASH_ALG_AES,
3169 .mode = HASH_MODE_CCM,
3170 },
3171 .auth_first = 0,
3172 },
3173 {
3174 .type = CRYPTO_ALG_TYPE_AEAD,
3175 .alg.aead = {
3176 .base = {
3177 .cra_name = "rfc4106(gcm(aes))",
3178 .cra_driver_name = "gcm-aes-esp-iproc",
3179 .cra_blocksize = AES_BLOCK_SIZE,
3180 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3181 },
3182 .setkey = aead_gcm_esp_setkey,
3183 .ivsize = GCM_RFC4106_IV_SIZE,
3184 .maxauthsize = AES_BLOCK_SIZE,
3185 },
3186 .cipher_info = {
3187 .alg = CIPHER_ALG_AES,
3188 .mode = CIPHER_MODE_GCM,
3189 },
3190 .auth_info = {
3191 .alg = HASH_ALG_AES,
3192 .mode = HASH_MODE_GCM,
3193 },
3194 .auth_first = 0,
3195 },
3196 {
3197 .type = CRYPTO_ALG_TYPE_AEAD,
3198 .alg.aead = {
3199 .base = {
3200 .cra_name = "rfc4309(ccm(aes))",
3201 .cra_driver_name = "ccm-aes-esp-iproc",
3202 .cra_blocksize = AES_BLOCK_SIZE,
3203 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3204 },
3205 .setkey = aead_ccm_esp_setkey,
3206 .ivsize = CCM_AES_IV_SIZE,
3207 .maxauthsize = AES_BLOCK_SIZE,
3208 },
3209 .cipher_info = {
3210 .alg = CIPHER_ALG_AES,
3211 .mode = CIPHER_MODE_CCM,
3212 },
3213 .auth_info = {
3214 .alg = HASH_ALG_AES,
3215 .mode = HASH_MODE_CCM,
3216 },
3217 .auth_first = 0,
3218 },
3219 {
3220 .type = CRYPTO_ALG_TYPE_AEAD,
3221 .alg.aead = {
3222 .base = {
3223 .cra_name = "rfc4543(gcm(aes))",
3224 .cra_driver_name = "gmac-aes-esp-iproc",
3225 .cra_blocksize = AES_BLOCK_SIZE,
3226 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3227 },
3228 .setkey = rfc4543_gcm_esp_setkey,
3229 .ivsize = GCM_RFC4106_IV_SIZE,
3230 .maxauthsize = AES_BLOCK_SIZE,
3231 },
3232 .cipher_info = {
3233 .alg = CIPHER_ALG_AES,
3234 .mode = CIPHER_MODE_GCM,
3235 },
3236 .auth_info = {
3237 .alg = HASH_ALG_AES,
3238 .mode = HASH_MODE_GCM,
3239 },
3240 .auth_first = 0,
3241 },
3242 {
3243 .type = CRYPTO_ALG_TYPE_AEAD,
3244 .alg.aead = {
3245 .base = {
3246 .cra_name = "authenc(hmac(md5),cbc(aes))",
3247 .cra_driver_name = "authenc-hmac-md5-cbc-aes-iproc",
3248 .cra_blocksize = AES_BLOCK_SIZE,
3249 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3250 },
3251 .setkey = aead_authenc_setkey,
3252 .ivsize = AES_BLOCK_SIZE,
3253 .maxauthsize = MD5_DIGEST_SIZE,
3254 },
3255 .cipher_info = {
3256 .alg = CIPHER_ALG_AES,
3257 .mode = CIPHER_MODE_CBC,
3258 },
3259 .auth_info = {
3260 .alg = HASH_ALG_MD5,
3261 .mode = HASH_MODE_HMAC,
3262 },
3263 .auth_first = 0,
3264 },
3265 {
3266 .type = CRYPTO_ALG_TYPE_AEAD,
3267 .alg.aead = {
3268 .base = {
3269 .cra_name = "authenc(hmac(sha1),cbc(aes))",
3270 .cra_driver_name = "authenc-hmac-sha1-cbc-aes-iproc",
3271 .cra_blocksize = AES_BLOCK_SIZE,
3272 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3273 },
3274 .setkey = aead_authenc_setkey,
3275 .ivsize = AES_BLOCK_SIZE,
3276 .maxauthsize = SHA1_DIGEST_SIZE,
3277 },
3278 .cipher_info = {
3279 .alg = CIPHER_ALG_AES,
3280 .mode = CIPHER_MODE_CBC,
3281 },
3282 .auth_info = {
3283 .alg = HASH_ALG_SHA1,
3284 .mode = HASH_MODE_HMAC,
3285 },
3286 .auth_first = 0,
3287 },
3288 {
3289 .type = CRYPTO_ALG_TYPE_AEAD,
3290 .alg.aead = {
3291 .base = {
3292 .cra_name = "authenc(hmac(sha256),cbc(aes))",
3293 .cra_driver_name = "authenc-hmac-sha256-cbc-aes-iproc",
3294 .cra_blocksize = AES_BLOCK_SIZE,
3295 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3296 },
3297 .setkey = aead_authenc_setkey,
3298 .ivsize = AES_BLOCK_SIZE,
3299 .maxauthsize = SHA256_DIGEST_SIZE,
3300 },
3301 .cipher_info = {
3302 .alg = CIPHER_ALG_AES,
3303 .mode = CIPHER_MODE_CBC,
3304 },
3305 .auth_info = {
3306 .alg = HASH_ALG_SHA256,
3307 .mode = HASH_MODE_HMAC,
3308 },
3309 .auth_first = 0,
3310 },
3311 {
3312 .type = CRYPTO_ALG_TYPE_AEAD,
3313 .alg.aead = {
3314 .base = {
3315 .cra_name = "authenc(hmac(md5),cbc(des))",
3316 .cra_driver_name = "authenc-hmac-md5-cbc-des-iproc",
3317 .cra_blocksize = DES_BLOCK_SIZE,
3318 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3319 },
3320 .setkey = aead_authenc_setkey,
3321 .ivsize = DES_BLOCK_SIZE,
3322 .maxauthsize = MD5_DIGEST_SIZE,
3323 },
3324 .cipher_info = {
3325 .alg = CIPHER_ALG_DES,
3326 .mode = CIPHER_MODE_CBC,
3327 },
3328 .auth_info = {
3329 .alg = HASH_ALG_MD5,
3330 .mode = HASH_MODE_HMAC,
3331 },
3332 .auth_first = 0,
3333 },
3334 {
3335 .type = CRYPTO_ALG_TYPE_AEAD,
3336 .alg.aead = {
3337 .base = {
3338 .cra_name = "authenc(hmac(sha1),cbc(des))",
3339 .cra_driver_name = "authenc-hmac-sha1-cbc-des-iproc",
3340 .cra_blocksize = DES_BLOCK_SIZE,
3341 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3342 },
3343 .setkey = aead_authenc_setkey,
3344 .ivsize = DES_BLOCK_SIZE,
3345 .maxauthsize = SHA1_DIGEST_SIZE,
3346 },
3347 .cipher_info = {
3348 .alg = CIPHER_ALG_DES,
3349 .mode = CIPHER_MODE_CBC,
3350 },
3351 .auth_info = {
3352 .alg = HASH_ALG_SHA1,
3353 .mode = HASH_MODE_HMAC,
3354 },
3355 .auth_first = 0,
3356 },
3357 {
3358 .type = CRYPTO_ALG_TYPE_AEAD,
3359 .alg.aead = {
3360 .base = {
3361 .cra_name = "authenc(hmac(sha224),cbc(des))",
3362 .cra_driver_name = "authenc-hmac-sha224-cbc-des-iproc",
3363 .cra_blocksize = DES_BLOCK_SIZE,
3364 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3365 },
3366 .setkey = aead_authenc_setkey,
3367 .ivsize = DES_BLOCK_SIZE,
3368 .maxauthsize = SHA224_DIGEST_SIZE,
3369 },
3370 .cipher_info = {
3371 .alg = CIPHER_ALG_DES,
3372 .mode = CIPHER_MODE_CBC,
3373 },
3374 .auth_info = {
3375 .alg = HASH_ALG_SHA224,
3376 .mode = HASH_MODE_HMAC,
3377 },
3378 .auth_first = 0,
3379 },
3380 {
3381 .type = CRYPTO_ALG_TYPE_AEAD,
3382 .alg.aead = {
3383 .base = {
3384 .cra_name = "authenc(hmac(sha256),cbc(des))",
3385 .cra_driver_name = "authenc-hmac-sha256-cbc-des-iproc",
3386 .cra_blocksize = DES_BLOCK_SIZE,
3387 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3388 },
3389 .setkey = aead_authenc_setkey,
3390 .ivsize = DES_BLOCK_SIZE,
3391 .maxauthsize = SHA256_DIGEST_SIZE,
3392 },
3393 .cipher_info = {
3394 .alg = CIPHER_ALG_DES,
3395 .mode = CIPHER_MODE_CBC,
3396 },
3397 .auth_info = {
3398 .alg = HASH_ALG_SHA256,
3399 .mode = HASH_MODE_HMAC,
3400 },
3401 .auth_first = 0,
3402 },
3403 {
3404 .type = CRYPTO_ALG_TYPE_AEAD,
3405 .alg.aead = {
3406 .base = {
3407 .cra_name = "authenc(hmac(sha384),cbc(des))",
3408 .cra_driver_name = "authenc-hmac-sha384-cbc-des-iproc",
3409 .cra_blocksize = DES_BLOCK_SIZE,
3410 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3411 },
3412 .setkey = aead_authenc_setkey,
3413 .ivsize = DES_BLOCK_SIZE,
3414 .maxauthsize = SHA384_DIGEST_SIZE,
3415 },
3416 .cipher_info = {
3417 .alg = CIPHER_ALG_DES,
3418 .mode = CIPHER_MODE_CBC,
3419 },
3420 .auth_info = {
3421 .alg = HASH_ALG_SHA384,
3422 .mode = HASH_MODE_HMAC,
3423 },
3424 .auth_first = 0,
3425 },
3426 {
3427 .type = CRYPTO_ALG_TYPE_AEAD,
3428 .alg.aead = {
3429 .base = {
3430 .cra_name = "authenc(hmac(sha512),cbc(des))",
3431 .cra_driver_name = "authenc-hmac-sha512-cbc-des-iproc",
3432 .cra_blocksize = DES_BLOCK_SIZE,
3433 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3434 },
3435 .setkey = aead_authenc_setkey,
3436 .ivsize = DES_BLOCK_SIZE,
3437 .maxauthsize = SHA512_DIGEST_SIZE,
3438 },
3439 .cipher_info = {
3440 .alg = CIPHER_ALG_DES,
3441 .mode = CIPHER_MODE_CBC,
3442 },
3443 .auth_info = {
3444 .alg = HASH_ALG_SHA512,
3445 .mode = HASH_MODE_HMAC,
3446 },
3447 .auth_first = 0,
3448 },
3449 {
3450 .type = CRYPTO_ALG_TYPE_AEAD,
3451 .alg.aead = {
3452 .base = {
3453 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
3454 .cra_driver_name = "authenc-hmac-md5-cbc-des3-iproc",
3455 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3456 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3457 },
3458 .setkey = aead_authenc_setkey,
3459 .ivsize = DES3_EDE_BLOCK_SIZE,
3460 .maxauthsize = MD5_DIGEST_SIZE,
3461 },
3462 .cipher_info = {
3463 .alg = CIPHER_ALG_3DES,
3464 .mode = CIPHER_MODE_CBC,
3465 },
3466 .auth_info = {
3467 .alg = HASH_ALG_MD5,
3468 .mode = HASH_MODE_HMAC,
3469 },
3470 .auth_first = 0,
3471 },
3472 {
3473 .type = CRYPTO_ALG_TYPE_AEAD,
3474 .alg.aead = {
3475 .base = {
3476 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
3477 .cra_driver_name = "authenc-hmac-sha1-cbc-des3-iproc",
3478 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3479 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3480 },
3481 .setkey = aead_authenc_setkey,
3482 .ivsize = DES3_EDE_BLOCK_SIZE,
3483 .maxauthsize = SHA1_DIGEST_SIZE,
3484 },
3485 .cipher_info = {
3486 .alg = CIPHER_ALG_3DES,
3487 .mode = CIPHER_MODE_CBC,
3488 },
3489 .auth_info = {
3490 .alg = HASH_ALG_SHA1,
3491 .mode = HASH_MODE_HMAC,
3492 },
3493 .auth_first = 0,
3494 },
3495 {
3496 .type = CRYPTO_ALG_TYPE_AEAD,
3497 .alg.aead = {
3498 .base = {
3499 .cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
3500 .cra_driver_name = "authenc-hmac-sha224-cbc-des3-iproc",
3501 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3502 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3503 },
3504 .setkey = aead_authenc_setkey,
3505 .ivsize = DES3_EDE_BLOCK_SIZE,
3506 .maxauthsize = SHA224_DIGEST_SIZE,
3507 },
3508 .cipher_info = {
3509 .alg = CIPHER_ALG_3DES,
3510 .mode = CIPHER_MODE_CBC,
3511 },
3512 .auth_info = {
3513 .alg = HASH_ALG_SHA224,
3514 .mode = HASH_MODE_HMAC,
3515 },
3516 .auth_first = 0,
3517 },
3518 {
3519 .type = CRYPTO_ALG_TYPE_AEAD,
3520 .alg.aead = {
3521 .base = {
3522 .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
3523 .cra_driver_name = "authenc-hmac-sha256-cbc-des3-iproc",
3524 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3525 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3526 },
3527 .setkey = aead_authenc_setkey,
3528 .ivsize = DES3_EDE_BLOCK_SIZE,
3529 .maxauthsize = SHA256_DIGEST_SIZE,
3530 },
3531 .cipher_info = {
3532 .alg = CIPHER_ALG_3DES,
3533 .mode = CIPHER_MODE_CBC,
3534 },
3535 .auth_info = {
3536 .alg = HASH_ALG_SHA256,
3537 .mode = HASH_MODE_HMAC,
3538 },
3539 .auth_first = 0,
3540 },
3541 {
3542 .type = CRYPTO_ALG_TYPE_AEAD,
3543 .alg.aead = {
3544 .base = {
3545 .cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
3546 .cra_driver_name = "authenc-hmac-sha384-cbc-des3-iproc",
3547 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3548 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3549 },
3550 .setkey = aead_authenc_setkey,
3551 .ivsize = DES3_EDE_BLOCK_SIZE,
3552 .maxauthsize = SHA384_DIGEST_SIZE,
3553 },
3554 .cipher_info = {
3555 .alg = CIPHER_ALG_3DES,
3556 .mode = CIPHER_MODE_CBC,
3557 },
3558 .auth_info = {
3559 .alg = HASH_ALG_SHA384,
3560 .mode = HASH_MODE_HMAC,
3561 },
3562 .auth_first = 0,
3563 },
3564 {
3565 .type = CRYPTO_ALG_TYPE_AEAD,
3566 .alg.aead = {
3567 .base = {
3568 .cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
3569 .cra_driver_name = "authenc-hmac-sha512-cbc-des3-iproc",
3570 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3571 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3572 },
3573 .setkey = aead_authenc_setkey,
3574 .ivsize = DES3_EDE_BLOCK_SIZE,
3575 .maxauthsize = SHA512_DIGEST_SIZE,
3576 },
3577 .cipher_info = {
3578 .alg = CIPHER_ALG_3DES,
3579 .mode = CIPHER_MODE_CBC,
3580 },
3581 .auth_info = {
3582 .alg = HASH_ALG_SHA512,
3583 .mode = HASH_MODE_HMAC,
3584 },
3585 .auth_first = 0,
3586 },
3587
3588
3589 {
3590 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3591 .alg.crypto = {
3592 .cra_name = "ecb(arc4)",
3593 .cra_driver_name = "ecb-arc4-iproc",
3594 .cra_blocksize = ARC4_BLOCK_SIZE,
3595 .cra_ablkcipher = {
3596 .min_keysize = ARC4_MIN_KEY_SIZE,
3597 .max_keysize = ARC4_MAX_KEY_SIZE,
3598 .ivsize = 0,
3599 }
3600 },
3601 .cipher_info = {
3602 .alg = CIPHER_ALG_RC4,
3603 .mode = CIPHER_MODE_NONE,
3604 },
3605 .auth_info = {
3606 .alg = HASH_ALG_NONE,
3607 .mode = HASH_MODE_NONE,
3608 },
3609 },
3610 {
3611 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3612 .alg.crypto = {
3613 .cra_name = "ofb(des)",
3614 .cra_driver_name = "ofb-des-iproc",
3615 .cra_blocksize = DES_BLOCK_SIZE,
3616 .cra_ablkcipher = {
3617 .min_keysize = DES_KEY_SIZE,
3618 .max_keysize = DES_KEY_SIZE,
3619 .ivsize = DES_BLOCK_SIZE,
3620 }
3621 },
3622 .cipher_info = {
3623 .alg = CIPHER_ALG_DES,
3624 .mode = CIPHER_MODE_OFB,
3625 },
3626 .auth_info = {
3627 .alg = HASH_ALG_NONE,
3628 .mode = HASH_MODE_NONE,
3629 },
3630 },
3631 {
3632 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3633 .alg.crypto = {
3634 .cra_name = "cbc(des)",
3635 .cra_driver_name = "cbc-des-iproc",
3636 .cra_blocksize = DES_BLOCK_SIZE,
3637 .cra_ablkcipher = {
3638 .min_keysize = DES_KEY_SIZE,
3639 .max_keysize = DES_KEY_SIZE,
3640 .ivsize = DES_BLOCK_SIZE,
3641 }
3642 },
3643 .cipher_info = {
3644 .alg = CIPHER_ALG_DES,
3645 .mode = CIPHER_MODE_CBC,
3646 },
3647 .auth_info = {
3648 .alg = HASH_ALG_NONE,
3649 .mode = HASH_MODE_NONE,
3650 },
3651 },
3652 {
3653 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3654 .alg.crypto = {
3655 .cra_name = "ecb(des)",
3656 .cra_driver_name = "ecb-des-iproc",
3657 .cra_blocksize = DES_BLOCK_SIZE,
3658 .cra_ablkcipher = {
3659 .min_keysize = DES_KEY_SIZE,
3660 .max_keysize = DES_KEY_SIZE,
3661 .ivsize = 0,
3662 }
3663 },
3664 .cipher_info = {
3665 .alg = CIPHER_ALG_DES,
3666 .mode = CIPHER_MODE_ECB,
3667 },
3668 .auth_info = {
3669 .alg = HASH_ALG_NONE,
3670 .mode = HASH_MODE_NONE,
3671 },
3672 },
3673 {
3674 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3675 .alg.crypto = {
3676 .cra_name = "ofb(des3_ede)",
3677 .cra_driver_name = "ofb-des3-iproc",
3678 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3679 .cra_ablkcipher = {
3680 .min_keysize = DES3_EDE_KEY_SIZE,
3681 .max_keysize = DES3_EDE_KEY_SIZE,
3682 .ivsize = DES3_EDE_BLOCK_SIZE,
3683 }
3684 },
3685 .cipher_info = {
3686 .alg = CIPHER_ALG_3DES,
3687 .mode = CIPHER_MODE_OFB,
3688 },
3689 .auth_info = {
3690 .alg = HASH_ALG_NONE,
3691 .mode = HASH_MODE_NONE,
3692 },
3693 },
3694 {
3695 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3696 .alg.crypto = {
3697 .cra_name = "cbc(des3_ede)",
3698 .cra_driver_name = "cbc-des3-iproc",
3699 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3700 .cra_ablkcipher = {
3701 .min_keysize = DES3_EDE_KEY_SIZE,
3702 .max_keysize = DES3_EDE_KEY_SIZE,
3703 .ivsize = DES3_EDE_BLOCK_SIZE,
3704 }
3705 },
3706 .cipher_info = {
3707 .alg = CIPHER_ALG_3DES,
3708 .mode = CIPHER_MODE_CBC,
3709 },
3710 .auth_info = {
3711 .alg = HASH_ALG_NONE,
3712 .mode = HASH_MODE_NONE,
3713 },
3714 },
3715 {
3716 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3717 .alg.crypto = {
3718 .cra_name = "ecb(des3_ede)",
3719 .cra_driver_name = "ecb-des3-iproc",
3720 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3721 .cra_ablkcipher = {
3722 .min_keysize = DES3_EDE_KEY_SIZE,
3723 .max_keysize = DES3_EDE_KEY_SIZE,
3724 .ivsize = 0,
3725 }
3726 },
3727 .cipher_info = {
3728 .alg = CIPHER_ALG_3DES,
3729 .mode = CIPHER_MODE_ECB,
3730 },
3731 .auth_info = {
3732 .alg = HASH_ALG_NONE,
3733 .mode = HASH_MODE_NONE,
3734 },
3735 },
3736 {
3737 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3738 .alg.crypto = {
3739 .cra_name = "ofb(aes)",
3740 .cra_driver_name = "ofb-aes-iproc",
3741 .cra_blocksize = AES_BLOCK_SIZE,
3742 .cra_ablkcipher = {
3743 .min_keysize = AES_MIN_KEY_SIZE,
3744 .max_keysize = AES_MAX_KEY_SIZE,
3745 .ivsize = AES_BLOCK_SIZE,
3746 }
3747 },
3748 .cipher_info = {
3749 .alg = CIPHER_ALG_AES,
3750 .mode = CIPHER_MODE_OFB,
3751 },
3752 .auth_info = {
3753 .alg = HASH_ALG_NONE,
3754 .mode = HASH_MODE_NONE,
3755 },
3756 },
3757 {
3758 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3759 .alg.crypto = {
3760 .cra_name = "cbc(aes)",
3761 .cra_driver_name = "cbc-aes-iproc",
3762 .cra_blocksize = AES_BLOCK_SIZE,
3763 .cra_ablkcipher = {
3764 .min_keysize = AES_MIN_KEY_SIZE,
3765 .max_keysize = AES_MAX_KEY_SIZE,
3766 .ivsize = AES_BLOCK_SIZE,
3767 }
3768 },
3769 .cipher_info = {
3770 .alg = CIPHER_ALG_AES,
3771 .mode = CIPHER_MODE_CBC,
3772 },
3773 .auth_info = {
3774 .alg = HASH_ALG_NONE,
3775 .mode = HASH_MODE_NONE,
3776 },
3777 },
3778 {
3779 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3780 .alg.crypto = {
3781 .cra_name = "ecb(aes)",
3782 .cra_driver_name = "ecb-aes-iproc",
3783 .cra_blocksize = AES_BLOCK_SIZE,
3784 .cra_ablkcipher = {
3785 .min_keysize = AES_MIN_KEY_SIZE,
3786 .max_keysize = AES_MAX_KEY_SIZE,
3787 .ivsize = 0,
3788 }
3789 },
3790 .cipher_info = {
3791 .alg = CIPHER_ALG_AES,
3792 .mode = CIPHER_MODE_ECB,
3793 },
3794 .auth_info = {
3795 .alg = HASH_ALG_NONE,
3796 .mode = HASH_MODE_NONE,
3797 },
3798 },
3799 {
3800 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3801 .alg.crypto = {
3802 .cra_name = "ctr(aes)",
3803 .cra_driver_name = "ctr-aes-iproc",
3804 .cra_blocksize = AES_BLOCK_SIZE,
3805 .cra_ablkcipher = {
3806 .min_keysize = AES_MIN_KEY_SIZE,
3807 .max_keysize = AES_MAX_KEY_SIZE,
3808 .ivsize = AES_BLOCK_SIZE,
3809 }
3810 },
3811 .cipher_info = {
3812 .alg = CIPHER_ALG_AES,
3813 .mode = CIPHER_MODE_CTR,
3814 },
3815 .auth_info = {
3816 .alg = HASH_ALG_NONE,
3817 .mode = HASH_MODE_NONE,
3818 },
3819 },
3820 {
3821 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3822 .alg.crypto = {
3823 .cra_name = "xts(aes)",
3824 .cra_driver_name = "xts-aes-iproc",
3825 .cra_blocksize = AES_BLOCK_SIZE,
3826 .cra_ablkcipher = {
3827 .min_keysize = 2 * AES_MIN_KEY_SIZE,
3828 .max_keysize = 2 * AES_MAX_KEY_SIZE,
3829 .ivsize = AES_BLOCK_SIZE,
3830 }
3831 },
3832 .cipher_info = {
3833 .alg = CIPHER_ALG_AES,
3834 .mode = CIPHER_MODE_XTS,
3835 },
3836 .auth_info = {
3837 .alg = HASH_ALG_NONE,
3838 .mode = HASH_MODE_NONE,
3839 },
3840 },
3841
3842
3843 {
3844 .type = CRYPTO_ALG_TYPE_AHASH,
3845 .alg.hash = {
3846 .halg.digestsize = MD5_DIGEST_SIZE,
3847 .halg.base = {
3848 .cra_name = "md5",
3849 .cra_driver_name = "md5-iproc",
3850 .cra_blocksize = MD5_BLOCK_WORDS * 4,
3851 .cra_flags = CRYPTO_ALG_ASYNC,
3852 }
3853 },
3854 .cipher_info = {
3855 .alg = CIPHER_ALG_NONE,
3856 .mode = CIPHER_MODE_NONE,
3857 },
3858 .auth_info = {
3859 .alg = HASH_ALG_MD5,
3860 .mode = HASH_MODE_HASH,
3861 },
3862 },
3863 {
3864 .type = CRYPTO_ALG_TYPE_AHASH,
3865 .alg.hash = {
3866 .halg.digestsize = MD5_DIGEST_SIZE,
3867 .halg.base = {
3868 .cra_name = "hmac(md5)",
3869 .cra_driver_name = "hmac-md5-iproc",
3870 .cra_blocksize = MD5_BLOCK_WORDS * 4,
3871 }
3872 },
3873 .cipher_info = {
3874 .alg = CIPHER_ALG_NONE,
3875 .mode = CIPHER_MODE_NONE,
3876 },
3877 .auth_info = {
3878 .alg = HASH_ALG_MD5,
3879 .mode = HASH_MODE_HMAC,
3880 },
3881 },
3882 {.type = CRYPTO_ALG_TYPE_AHASH,
3883 .alg.hash = {
3884 .halg.digestsize = SHA1_DIGEST_SIZE,
3885 .halg.base = {
3886 .cra_name = "sha1",
3887 .cra_driver_name = "sha1-iproc",
3888 .cra_blocksize = SHA1_BLOCK_SIZE,
3889 }
3890 },
3891 .cipher_info = {
3892 .alg = CIPHER_ALG_NONE,
3893 .mode = CIPHER_MODE_NONE,
3894 },
3895 .auth_info = {
3896 .alg = HASH_ALG_SHA1,
3897 .mode = HASH_MODE_HASH,
3898 },
3899 },
3900 {.type = CRYPTO_ALG_TYPE_AHASH,
3901 .alg.hash = {
3902 .halg.digestsize = SHA1_DIGEST_SIZE,
3903 .halg.base = {
3904 .cra_name = "hmac(sha1)",
3905 .cra_driver_name = "hmac-sha1-iproc",
3906 .cra_blocksize = SHA1_BLOCK_SIZE,
3907 }
3908 },
3909 .cipher_info = {
3910 .alg = CIPHER_ALG_NONE,
3911 .mode = CIPHER_MODE_NONE,
3912 },
3913 .auth_info = {
3914 .alg = HASH_ALG_SHA1,
3915 .mode = HASH_MODE_HMAC,
3916 },
3917 },
3918 {.type = CRYPTO_ALG_TYPE_AHASH,
3919 .alg.hash = {
3920 .halg.digestsize = SHA224_DIGEST_SIZE,
3921 .halg.base = {
3922 .cra_name = "sha224",
3923 .cra_driver_name = "sha224-iproc",
3924 .cra_blocksize = SHA224_BLOCK_SIZE,
3925 }
3926 },
3927 .cipher_info = {
3928 .alg = CIPHER_ALG_NONE,
3929 .mode = CIPHER_MODE_NONE,
3930 },
3931 .auth_info = {
3932 .alg = HASH_ALG_SHA224,
3933 .mode = HASH_MODE_HASH,
3934 },
3935 },
3936 {.type = CRYPTO_ALG_TYPE_AHASH,
3937 .alg.hash = {
3938 .halg.digestsize = SHA224_DIGEST_SIZE,
3939 .halg.base = {
3940 .cra_name = "hmac(sha224)",
3941 .cra_driver_name = "hmac-sha224-iproc",
3942 .cra_blocksize = SHA224_BLOCK_SIZE,
3943 }
3944 },
3945 .cipher_info = {
3946 .alg = CIPHER_ALG_NONE,
3947 .mode = CIPHER_MODE_NONE,
3948 },
3949 .auth_info = {
3950 .alg = HASH_ALG_SHA224,
3951 .mode = HASH_MODE_HMAC,
3952 },
3953 },
3954 {.type = CRYPTO_ALG_TYPE_AHASH,
3955 .alg.hash = {
3956 .halg.digestsize = SHA256_DIGEST_SIZE,
3957 .halg.base = {
3958 .cra_name = "sha256",
3959 .cra_driver_name = "sha256-iproc",
3960 .cra_blocksize = SHA256_BLOCK_SIZE,
3961 }
3962 },
3963 .cipher_info = {
3964 .alg = CIPHER_ALG_NONE,
3965 .mode = CIPHER_MODE_NONE,
3966 },
3967 .auth_info = {
3968 .alg = HASH_ALG_SHA256,
3969 .mode = HASH_MODE_HASH,
3970 },
3971 },
3972 {.type = CRYPTO_ALG_TYPE_AHASH,
3973 .alg.hash = {
3974 .halg.digestsize = SHA256_DIGEST_SIZE,
3975 .halg.base = {
3976 .cra_name = "hmac(sha256)",
3977 .cra_driver_name = "hmac-sha256-iproc",
3978 .cra_blocksize = SHA256_BLOCK_SIZE,
3979 }
3980 },
3981 .cipher_info = {
3982 .alg = CIPHER_ALG_NONE,
3983 .mode = CIPHER_MODE_NONE,
3984 },
3985 .auth_info = {
3986 .alg = HASH_ALG_SHA256,
3987 .mode = HASH_MODE_HMAC,
3988 },
3989 },
3990 {
3991 .type = CRYPTO_ALG_TYPE_AHASH,
3992 .alg.hash = {
3993 .halg.digestsize = SHA384_DIGEST_SIZE,
3994 .halg.base = {
3995 .cra_name = "sha384",
3996 .cra_driver_name = "sha384-iproc",
3997 .cra_blocksize = SHA384_BLOCK_SIZE,
3998 }
3999 },
4000 .cipher_info = {
4001 .alg = CIPHER_ALG_NONE,
4002 .mode = CIPHER_MODE_NONE,
4003 },
4004 .auth_info = {
4005 .alg = HASH_ALG_SHA384,
4006 .mode = HASH_MODE_HASH,
4007 },
4008 },
4009 {
4010 .type = CRYPTO_ALG_TYPE_AHASH,
4011 .alg.hash = {
4012 .halg.digestsize = SHA384_DIGEST_SIZE,
4013 .halg.base = {
4014 .cra_name = "hmac(sha384)",
4015 .cra_driver_name = "hmac-sha384-iproc",
4016 .cra_blocksize = SHA384_BLOCK_SIZE,
4017 }
4018 },
4019 .cipher_info = {
4020 .alg = CIPHER_ALG_NONE,
4021 .mode = CIPHER_MODE_NONE,
4022 },
4023 .auth_info = {
4024 .alg = HASH_ALG_SHA384,
4025 .mode = HASH_MODE_HMAC,
4026 },
4027 },
4028 {
4029 .type = CRYPTO_ALG_TYPE_AHASH,
4030 .alg.hash = {
4031 .halg.digestsize = SHA512_DIGEST_SIZE,
4032 .halg.base = {
4033 .cra_name = "sha512",
4034 .cra_driver_name = "sha512-iproc",
4035 .cra_blocksize = SHA512_BLOCK_SIZE,
4036 }
4037 },
4038 .cipher_info = {
4039 .alg = CIPHER_ALG_NONE,
4040 .mode = CIPHER_MODE_NONE,
4041 },
4042 .auth_info = {
4043 .alg = HASH_ALG_SHA512,
4044 .mode = HASH_MODE_HASH,
4045 },
4046 },
4047 {
4048 .type = CRYPTO_ALG_TYPE_AHASH,
4049 .alg.hash = {
4050 .halg.digestsize = SHA512_DIGEST_SIZE,
4051 .halg.base = {
4052 .cra_name = "hmac(sha512)",
4053 .cra_driver_name = "hmac-sha512-iproc",
4054 .cra_blocksize = SHA512_BLOCK_SIZE,
4055 }
4056 },
4057 .cipher_info = {
4058 .alg = CIPHER_ALG_NONE,
4059 .mode = CIPHER_MODE_NONE,
4060 },
4061 .auth_info = {
4062 .alg = HASH_ALG_SHA512,
4063 .mode = HASH_MODE_HMAC,
4064 },
4065 },
4066 {
4067 .type = CRYPTO_ALG_TYPE_AHASH,
4068 .alg.hash = {
4069 .halg.digestsize = SHA3_224_DIGEST_SIZE,
4070 .halg.base = {
4071 .cra_name = "sha3-224",
4072 .cra_driver_name = "sha3-224-iproc",
4073 .cra_blocksize = SHA3_224_BLOCK_SIZE,
4074 }
4075 },
4076 .cipher_info = {
4077 .alg = CIPHER_ALG_NONE,
4078 .mode = CIPHER_MODE_NONE,
4079 },
4080 .auth_info = {
4081 .alg = HASH_ALG_SHA3_224,
4082 .mode = HASH_MODE_HASH,
4083 },
4084 },
4085 {
4086 .type = CRYPTO_ALG_TYPE_AHASH,
4087 .alg.hash = {
4088 .halg.digestsize = SHA3_224_DIGEST_SIZE,
4089 .halg.base = {
4090 .cra_name = "hmac(sha3-224)",
4091 .cra_driver_name = "hmac-sha3-224-iproc",
4092 .cra_blocksize = SHA3_224_BLOCK_SIZE,
4093 }
4094 },
4095 .cipher_info = {
4096 .alg = CIPHER_ALG_NONE,
4097 .mode = CIPHER_MODE_NONE,
4098 },
4099 .auth_info = {
4100 .alg = HASH_ALG_SHA3_224,
4101 .mode = HASH_MODE_HMAC
4102 },
4103 },
4104 {
4105 .type = CRYPTO_ALG_TYPE_AHASH,
4106 .alg.hash = {
4107 .halg.digestsize = SHA3_256_DIGEST_SIZE,
4108 .halg.base = {
4109 .cra_name = "sha3-256",
4110 .cra_driver_name = "sha3-256-iproc",
4111 .cra_blocksize = SHA3_256_BLOCK_SIZE,
4112 }
4113 },
4114 .cipher_info = {
4115 .alg = CIPHER_ALG_NONE,
4116 .mode = CIPHER_MODE_NONE,
4117 },
4118 .auth_info = {
4119 .alg = HASH_ALG_SHA3_256,
4120 .mode = HASH_MODE_HASH,
4121 },
4122 },
4123 {
4124 .type = CRYPTO_ALG_TYPE_AHASH,
4125 .alg.hash = {
4126 .halg.digestsize = SHA3_256_DIGEST_SIZE,
4127 .halg.base = {
4128 .cra_name = "hmac(sha3-256)",
4129 .cra_driver_name = "hmac-sha3-256-iproc",
4130 .cra_blocksize = SHA3_256_BLOCK_SIZE,
4131 }
4132 },
4133 .cipher_info = {
4134 .alg = CIPHER_ALG_NONE,
4135 .mode = CIPHER_MODE_NONE,
4136 },
4137 .auth_info = {
4138 .alg = HASH_ALG_SHA3_256,
4139 .mode = HASH_MODE_HMAC,
4140 },
4141 },
4142 {
4143 .type = CRYPTO_ALG_TYPE_AHASH,
4144 .alg.hash = {
4145 .halg.digestsize = SHA3_384_DIGEST_SIZE,
4146 .halg.base = {
4147 .cra_name = "sha3-384",
4148 .cra_driver_name = "sha3-384-iproc",
4149 .cra_blocksize = SHA3_224_BLOCK_SIZE,
4150 }
4151 },
4152 .cipher_info = {
4153 .alg = CIPHER_ALG_NONE,
4154 .mode = CIPHER_MODE_NONE,
4155 },
4156 .auth_info = {
4157 .alg = HASH_ALG_SHA3_384,
4158 .mode = HASH_MODE_HASH,
4159 },
4160 },
4161 {
4162 .type = CRYPTO_ALG_TYPE_AHASH,
4163 .alg.hash = {
4164 .halg.digestsize = SHA3_384_DIGEST_SIZE,
4165 .halg.base = {
4166 .cra_name = "hmac(sha3-384)",
4167 .cra_driver_name = "hmac-sha3-384-iproc",
4168 .cra_blocksize = SHA3_384_BLOCK_SIZE,
4169 }
4170 },
4171 .cipher_info = {
4172 .alg = CIPHER_ALG_NONE,
4173 .mode = CIPHER_MODE_NONE,
4174 },
4175 .auth_info = {
4176 .alg = HASH_ALG_SHA3_384,
4177 .mode = HASH_MODE_HMAC,
4178 },
4179 },
4180 {
4181 .type = CRYPTO_ALG_TYPE_AHASH,
4182 .alg.hash = {
4183 .halg.digestsize = SHA3_512_DIGEST_SIZE,
4184 .halg.base = {
4185 .cra_name = "sha3-512",
4186 .cra_driver_name = "sha3-512-iproc",
4187 .cra_blocksize = SHA3_512_BLOCK_SIZE,
4188 }
4189 },
4190 .cipher_info = {
4191 .alg = CIPHER_ALG_NONE,
4192 .mode = CIPHER_MODE_NONE,
4193 },
4194 .auth_info = {
4195 .alg = HASH_ALG_SHA3_512,
4196 .mode = HASH_MODE_HASH,
4197 },
4198 },
4199 {
4200 .type = CRYPTO_ALG_TYPE_AHASH,
4201 .alg.hash = {
4202 .halg.digestsize = SHA3_512_DIGEST_SIZE,
4203 .halg.base = {
4204 .cra_name = "hmac(sha3-512)",
4205 .cra_driver_name = "hmac-sha3-512-iproc",
4206 .cra_blocksize = SHA3_512_BLOCK_SIZE,
4207 }
4208 },
4209 .cipher_info = {
4210 .alg = CIPHER_ALG_NONE,
4211 .mode = CIPHER_MODE_NONE,
4212 },
4213 .auth_info = {
4214 .alg = HASH_ALG_SHA3_512,
4215 .mode = HASH_MODE_HMAC,
4216 },
4217 },
4218 {
4219 .type = CRYPTO_ALG_TYPE_AHASH,
4220 .alg.hash = {
4221 .halg.digestsize = AES_BLOCK_SIZE,
4222 .halg.base = {
4223 .cra_name = "xcbc(aes)",
4224 .cra_driver_name = "xcbc-aes-iproc",
4225 .cra_blocksize = AES_BLOCK_SIZE,
4226 }
4227 },
4228 .cipher_info = {
4229 .alg = CIPHER_ALG_NONE,
4230 .mode = CIPHER_MODE_NONE,
4231 },
4232 .auth_info = {
4233 .alg = HASH_ALG_AES,
4234 .mode = HASH_MODE_XCBC,
4235 },
4236 },
4237 {
4238 .type = CRYPTO_ALG_TYPE_AHASH,
4239 .alg.hash = {
4240 .halg.digestsize = AES_BLOCK_SIZE,
4241 .halg.base = {
4242 .cra_name = "cmac(aes)",
4243 .cra_driver_name = "cmac-aes-iproc",
4244 .cra_blocksize = AES_BLOCK_SIZE,
4245 }
4246 },
4247 .cipher_info = {
4248 .alg = CIPHER_ALG_NONE,
4249 .mode = CIPHER_MODE_NONE,
4250 },
4251 .auth_info = {
4252 .alg = HASH_ALG_AES,
4253 .mode = HASH_MODE_CMAC,
4254 },
4255 },
4256 };
4257
4258 static int generic_cra_init(struct crypto_tfm *tfm,
4259 struct iproc_alg_s *cipher_alg)
4260 {
4261 struct spu_hw *spu = &iproc_priv.spu;
4262 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4263 unsigned int blocksize = crypto_tfm_alg_blocksize(tfm);
4264
4265 flow_log("%s()\n", __func__);
4266
4267 ctx->alg = cipher_alg;
4268 ctx->cipher = cipher_alg->cipher_info;
4269 ctx->auth = cipher_alg->auth_info;
4270 ctx->auth_first = cipher_alg->auth_first;
4271 ctx->max_payload = spu->spu_ctx_max_payload(ctx->cipher.alg,
4272 ctx->cipher.mode,
4273 blocksize);
4274 ctx->fallback_cipher = NULL;
4275
4276 ctx->enckeylen = 0;
4277 ctx->authkeylen = 0;
4278
4279 atomic_inc(&iproc_priv.stream_count);
4280 atomic_inc(&iproc_priv.session_count);
4281
4282 return 0;
4283 }
4284
4285 static int ablkcipher_cra_init(struct crypto_tfm *tfm)
4286 {
4287 struct crypto_alg *alg = tfm->__crt_alg;
4288 struct iproc_alg_s *cipher_alg;
4289
4290 flow_log("%s()\n", __func__);
4291
4292 tfm->crt_ablkcipher.reqsize = sizeof(struct iproc_reqctx_s);
4293
4294 cipher_alg = container_of(alg, struct iproc_alg_s, alg.crypto);
4295 return generic_cra_init(tfm, cipher_alg);
4296 }
4297
4298 static int ahash_cra_init(struct crypto_tfm *tfm)
4299 {
4300 int err;
4301 struct crypto_alg *alg = tfm->__crt_alg;
4302 struct iproc_alg_s *cipher_alg;
4303
4304 cipher_alg = container_of(__crypto_ahash_alg(alg), struct iproc_alg_s,
4305 alg.hash);
4306
4307 err = generic_cra_init(tfm, cipher_alg);
4308 flow_log("%s()\n", __func__);
4309
4310
4311
4312
4313
4314 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4315 sizeof(struct iproc_reqctx_s));
4316
4317 return err;
4318 }
4319
4320 static int aead_cra_init(struct crypto_aead *aead)
4321 {
4322 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
4323 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4324 struct crypto_alg *alg = tfm->__crt_alg;
4325 struct aead_alg *aalg = container_of(alg, struct aead_alg, base);
4326 struct iproc_alg_s *cipher_alg = container_of(aalg, struct iproc_alg_s,
4327 alg.aead);
4328
4329 int err = generic_cra_init(tfm, cipher_alg);
4330
4331 flow_log("%s()\n", __func__);
4332
4333 crypto_aead_set_reqsize(aead, sizeof(struct iproc_reqctx_s));
4334 ctx->is_esp = false;
4335 ctx->salt_len = 0;
4336 ctx->salt_offset = 0;
4337
4338
4339 get_random_bytes(ctx->iv, MAX_IV_SIZE);
4340 flow_dump(" iv: ", ctx->iv, MAX_IV_SIZE);
4341
4342 if (!err) {
4343 if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
4344 flow_log("%s() creating fallback cipher\n", __func__);
4345
4346 ctx->fallback_cipher =
4347 crypto_alloc_aead(alg->cra_name, 0,
4348 CRYPTO_ALG_ASYNC |
4349 CRYPTO_ALG_NEED_FALLBACK);
4350 if (IS_ERR(ctx->fallback_cipher)) {
4351 pr_err("%s() Error: failed to allocate fallback for %s\n",
4352 __func__, alg->cra_name);
4353 return PTR_ERR(ctx->fallback_cipher);
4354 }
4355 }
4356 }
4357
4358 return err;
4359 }
4360
4361 static void generic_cra_exit(struct crypto_tfm *tfm)
4362 {
4363 atomic_dec(&iproc_priv.session_count);
4364 }
4365
4366 static void aead_cra_exit(struct crypto_aead *aead)
4367 {
4368 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
4369 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4370
4371 generic_cra_exit(tfm);
4372
4373 if (ctx->fallback_cipher) {
4374 crypto_free_aead(ctx->fallback_cipher);
4375 ctx->fallback_cipher = NULL;
4376 }
4377 }
4378
4379
4380
4381
4382
4383
4384
4385
4386 static void spu_functions_register(struct device *dev,
4387 enum spu_spu_type spu_type,
4388 enum spu_spu_subtype spu_subtype)
4389 {
4390 struct spu_hw *spu = &iproc_priv.spu;
4391
4392 if (spu_type == SPU_TYPE_SPUM) {
4393 dev_dbg(dev, "Registering SPUM functions");
4394 spu->spu_dump_msg_hdr = spum_dump_msg_hdr;
4395 spu->spu_payload_length = spum_payload_length;
4396 spu->spu_response_hdr_len = spum_response_hdr_len;
4397 spu->spu_hash_pad_len = spum_hash_pad_len;
4398 spu->spu_gcm_ccm_pad_len = spum_gcm_ccm_pad_len;
4399 spu->spu_assoc_resp_len = spum_assoc_resp_len;
4400 spu->spu_aead_ivlen = spum_aead_ivlen;
4401 spu->spu_hash_type = spum_hash_type;
4402 spu->spu_digest_size = spum_digest_size;
4403 spu->spu_create_request = spum_create_request;
4404 spu->spu_cipher_req_init = spum_cipher_req_init;
4405 spu->spu_cipher_req_finish = spum_cipher_req_finish;
4406 spu->spu_request_pad = spum_request_pad;
4407 spu->spu_tx_status_len = spum_tx_status_len;
4408 spu->spu_rx_status_len = spum_rx_status_len;
4409 spu->spu_status_process = spum_status_process;
4410 spu->spu_xts_tweak_in_payload = spum_xts_tweak_in_payload;
4411 spu->spu_ccm_update_iv = spum_ccm_update_iv;
4412 spu->spu_wordalign_padlen = spum_wordalign_padlen;
4413 if (spu_subtype == SPU_SUBTYPE_SPUM_NS2)
4414 spu->spu_ctx_max_payload = spum_ns2_ctx_max_payload;
4415 else
4416 spu->spu_ctx_max_payload = spum_nsp_ctx_max_payload;
4417 } else {
4418 dev_dbg(dev, "Registering SPU2 functions");
4419 spu->spu_dump_msg_hdr = spu2_dump_msg_hdr;
4420 spu->spu_ctx_max_payload = spu2_ctx_max_payload;
4421 spu->spu_payload_length = spu2_payload_length;
4422 spu->spu_response_hdr_len = spu2_response_hdr_len;
4423 spu->spu_hash_pad_len = spu2_hash_pad_len;
4424 spu->spu_gcm_ccm_pad_len = spu2_gcm_ccm_pad_len;
4425 spu->spu_assoc_resp_len = spu2_assoc_resp_len;
4426 spu->spu_aead_ivlen = spu2_aead_ivlen;
4427 spu->spu_hash_type = spu2_hash_type;
4428 spu->spu_digest_size = spu2_digest_size;
4429 spu->spu_create_request = spu2_create_request;
4430 spu->spu_cipher_req_init = spu2_cipher_req_init;
4431 spu->spu_cipher_req_finish = spu2_cipher_req_finish;
4432 spu->spu_request_pad = spu2_request_pad;
4433 spu->spu_tx_status_len = spu2_tx_status_len;
4434 spu->spu_rx_status_len = spu2_rx_status_len;
4435 spu->spu_status_process = spu2_status_process;
4436 spu->spu_xts_tweak_in_payload = spu2_xts_tweak_in_payload;
4437 spu->spu_ccm_update_iv = spu2_ccm_update_iv;
4438 spu->spu_wordalign_padlen = spu2_wordalign_padlen;
4439 }
4440 }
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450 static int spu_mb_init(struct device *dev)
4451 {
4452 struct mbox_client *mcl = &iproc_priv.mcl;
4453 int err, i;
4454
4455 iproc_priv.mbox = devm_kcalloc(dev, iproc_priv.spu.num_chan,
4456 sizeof(struct mbox_chan *), GFP_KERNEL);
4457 if (!iproc_priv.mbox)
4458 return -ENOMEM;
4459
4460 mcl->dev = dev;
4461 mcl->tx_block = false;
4462 mcl->tx_tout = 0;
4463 mcl->knows_txdone = true;
4464 mcl->rx_callback = spu_rx_callback;
4465 mcl->tx_done = NULL;
4466
4467 for (i = 0; i < iproc_priv.spu.num_chan; i++) {
4468 iproc_priv.mbox[i] = mbox_request_channel(mcl, i);
4469 if (IS_ERR(iproc_priv.mbox[i])) {
4470 err = (int)PTR_ERR(iproc_priv.mbox[i]);
4471 dev_err(dev,
4472 "Mbox channel %d request failed with err %d",
4473 i, err);
4474 iproc_priv.mbox[i] = NULL;
4475 goto free_channels;
4476 }
4477 }
4478
4479 return 0;
4480 free_channels:
4481 for (i = 0; i < iproc_priv.spu.num_chan; i++) {
4482 if (iproc_priv.mbox[i])
4483 mbox_free_channel(iproc_priv.mbox[i]);
4484 }
4485
4486 return err;
4487 }
4488
4489 static void spu_mb_release(struct platform_device *pdev)
4490 {
4491 int i;
4492
4493 for (i = 0; i < iproc_priv.spu.num_chan; i++)
4494 mbox_free_channel(iproc_priv.mbox[i]);
4495 }
4496
4497 static void spu_counters_init(void)
4498 {
4499 int i;
4500 int j;
4501
4502 atomic_set(&iproc_priv.session_count, 0);
4503 atomic_set(&iproc_priv.stream_count, 0);
4504 atomic_set(&iproc_priv.next_chan, (int)iproc_priv.spu.num_chan);
4505 atomic64_set(&iproc_priv.bytes_in, 0);
4506 atomic64_set(&iproc_priv.bytes_out, 0);
4507 for (i = 0; i < SPU_OP_NUM; i++) {
4508 atomic_set(&iproc_priv.op_counts[i], 0);
4509 atomic_set(&iproc_priv.setkey_cnt[i], 0);
4510 }
4511 for (i = 0; i < CIPHER_ALG_LAST; i++)
4512 for (j = 0; j < CIPHER_MODE_LAST; j++)
4513 atomic_set(&iproc_priv.cipher_cnt[i][j], 0);
4514
4515 for (i = 0; i < HASH_ALG_LAST; i++) {
4516 atomic_set(&iproc_priv.hash_cnt[i], 0);
4517 atomic_set(&iproc_priv.hmac_cnt[i], 0);
4518 }
4519 for (i = 0; i < AEAD_TYPE_LAST; i++)
4520 atomic_set(&iproc_priv.aead_cnt[i], 0);
4521
4522 atomic_set(&iproc_priv.mb_no_spc, 0);
4523 atomic_set(&iproc_priv.mb_send_fail, 0);
4524 atomic_set(&iproc_priv.bad_icv, 0);
4525 }
4526
4527 static int spu_register_ablkcipher(struct iproc_alg_s *driver_alg)
4528 {
4529 struct spu_hw *spu = &iproc_priv.spu;
4530 struct crypto_alg *crypto = &driver_alg->alg.crypto;
4531 int err;
4532
4533
4534 if ((driver_alg->cipher_info.alg == CIPHER_ALG_RC4) &&
4535 (spu->spu_type == SPU_TYPE_SPU2))
4536 return 0;
4537
4538 crypto->cra_module = THIS_MODULE;
4539 crypto->cra_priority = cipher_pri;
4540 crypto->cra_alignmask = 0;
4541 crypto->cra_ctxsize = sizeof(struct iproc_ctx_s);
4542
4543 crypto->cra_init = ablkcipher_cra_init;
4544 crypto->cra_exit = generic_cra_exit;
4545 crypto->cra_type = &crypto_ablkcipher_type;
4546 crypto->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
4547 CRYPTO_ALG_KERN_DRIVER_ONLY;
4548
4549 crypto->cra_ablkcipher.setkey = ablkcipher_setkey;
4550 crypto->cra_ablkcipher.encrypt = ablkcipher_encrypt;
4551 crypto->cra_ablkcipher.decrypt = ablkcipher_decrypt;
4552
4553 err = crypto_register_alg(crypto);
4554
4555 if (err == 0)
4556 driver_alg->registered = true;
4557 pr_debug(" registered ablkcipher %s\n", crypto->cra_driver_name);
4558 return err;
4559 }
4560
4561 static int spu_register_ahash(struct iproc_alg_s *driver_alg)
4562 {
4563 struct spu_hw *spu = &iproc_priv.spu;
4564 struct ahash_alg *hash = &driver_alg->alg.hash;
4565 int err;
4566
4567
4568 if ((driver_alg->auth_info.alg == HASH_ALG_AES) &&
4569 (driver_alg->auth_info.mode != HASH_MODE_XCBC) &&
4570 (spu->spu_type == SPU_TYPE_SPUM))
4571 return 0;
4572
4573
4574 if ((driver_alg->auth_info.alg >= HASH_ALG_SHA3_224) &&
4575 (spu->spu_subtype != SPU_SUBTYPE_SPU2_V2))
4576 return 0;
4577
4578 hash->halg.base.cra_module = THIS_MODULE;
4579 hash->halg.base.cra_priority = hash_pri;
4580 hash->halg.base.cra_alignmask = 0;
4581 hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4582 hash->halg.base.cra_init = ahash_cra_init;
4583 hash->halg.base.cra_exit = generic_cra_exit;
4584 hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
4585 hash->halg.statesize = sizeof(struct spu_hash_export_s);
4586
4587 if (driver_alg->auth_info.mode != HASH_MODE_HMAC) {
4588 hash->init = ahash_init;
4589 hash->update = ahash_update;
4590 hash->final = ahash_final;
4591 hash->finup = ahash_finup;
4592 hash->digest = ahash_digest;
4593 if ((driver_alg->auth_info.alg == HASH_ALG_AES) &&
4594 ((driver_alg->auth_info.mode == HASH_MODE_XCBC) ||
4595 (driver_alg->auth_info.mode == HASH_MODE_CMAC))) {
4596 hash->setkey = ahash_setkey;
4597 }
4598 } else {
4599 hash->setkey = ahash_hmac_setkey;
4600 hash->init = ahash_hmac_init;
4601 hash->update = ahash_hmac_update;
4602 hash->final = ahash_hmac_final;
4603 hash->finup = ahash_hmac_finup;
4604 hash->digest = ahash_hmac_digest;
4605 }
4606 hash->export = ahash_export;
4607 hash->import = ahash_import;
4608
4609 err = crypto_register_ahash(hash);
4610
4611 if (err == 0)
4612 driver_alg->registered = true;
4613 pr_debug(" registered ahash %s\n",
4614 hash->halg.base.cra_driver_name);
4615 return err;
4616 }
4617
4618 static int spu_register_aead(struct iproc_alg_s *driver_alg)
4619 {
4620 struct aead_alg *aead = &driver_alg->alg.aead;
4621 int err;
4622
4623 aead->base.cra_module = THIS_MODULE;
4624 aead->base.cra_priority = aead_pri;
4625 aead->base.cra_alignmask = 0;
4626 aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4627
4628 aead->base.cra_flags |= CRYPTO_ALG_ASYNC;
4629
4630 aead->setauthsize = aead_setauthsize;
4631 aead->encrypt = aead_encrypt;
4632 aead->decrypt = aead_decrypt;
4633 aead->init = aead_cra_init;
4634 aead->exit = aead_cra_exit;
4635
4636 err = crypto_register_aead(aead);
4637
4638 if (err == 0)
4639 driver_alg->registered = true;
4640 pr_debug(" registered aead %s\n", aead->base.cra_driver_name);
4641 return err;
4642 }
4643
4644
4645 static int spu_algs_register(struct device *dev)
4646 {
4647 int i, j;
4648 int err;
4649
4650 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4651 switch (driver_algs[i].type) {
4652 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4653 err = spu_register_ablkcipher(&driver_algs[i]);
4654 break;
4655 case CRYPTO_ALG_TYPE_AHASH:
4656 err = spu_register_ahash(&driver_algs[i]);
4657 break;
4658 case CRYPTO_ALG_TYPE_AEAD:
4659 err = spu_register_aead(&driver_algs[i]);
4660 break;
4661 default:
4662 dev_err(dev,
4663 "iproc-crypto: unknown alg type: %d",
4664 driver_algs[i].type);
4665 err = -EINVAL;
4666 }
4667
4668 if (err) {
4669 dev_err(dev, "alg registration failed with error %d\n",
4670 err);
4671 goto err_algs;
4672 }
4673 }
4674
4675 return 0;
4676
4677 err_algs:
4678 for (j = 0; j < i; j++) {
4679
4680 if (!driver_algs[j].registered)
4681 continue;
4682 switch (driver_algs[j].type) {
4683 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4684 crypto_unregister_alg(&driver_algs[j].alg.crypto);
4685 driver_algs[j].registered = false;
4686 break;
4687 case CRYPTO_ALG_TYPE_AHASH:
4688 crypto_unregister_ahash(&driver_algs[j].alg.hash);
4689 driver_algs[j].registered = false;
4690 break;
4691 case CRYPTO_ALG_TYPE_AEAD:
4692 crypto_unregister_aead(&driver_algs[j].alg.aead);
4693 driver_algs[j].registered = false;
4694 break;
4695 }
4696 }
4697 return err;
4698 }
4699
4700
4701
4702 static struct spu_type_subtype spum_ns2_types = {
4703 SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NS2
4704 };
4705
4706 static struct spu_type_subtype spum_nsp_types = {
4707 SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NSP
4708 };
4709
4710 static struct spu_type_subtype spu2_types = {
4711 SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V1
4712 };
4713
4714 static struct spu_type_subtype spu2_v2_types = {
4715 SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V2
4716 };
4717
4718 static const struct of_device_id bcm_spu_dt_ids[] = {
4719 {
4720 .compatible = "brcm,spum-crypto",
4721 .data = &spum_ns2_types,
4722 },
4723 {
4724 .compatible = "brcm,spum-nsp-crypto",
4725 .data = &spum_nsp_types,
4726 },
4727 {
4728 .compatible = "brcm,spu2-crypto",
4729 .data = &spu2_types,
4730 },
4731 {
4732 .compatible = "brcm,spu2-v2-crypto",
4733 .data = &spu2_v2_types,
4734 },
4735 { }
4736 };
4737
4738 MODULE_DEVICE_TABLE(of, bcm_spu_dt_ids);
4739
4740 static int spu_dt_read(struct platform_device *pdev)
4741 {
4742 struct device *dev = &pdev->dev;
4743 struct spu_hw *spu = &iproc_priv.spu;
4744 struct resource *spu_ctrl_regs;
4745 const struct spu_type_subtype *matched_spu_type;
4746 struct device_node *dn = pdev->dev.of_node;
4747 int err, i;
4748
4749
4750 spu->num_chan = of_count_phandle_with_args(dn, "mboxes", "#mbox-cells");
4751
4752 matched_spu_type = of_device_get_match_data(dev);
4753 if (!matched_spu_type) {
4754 dev_err(&pdev->dev, "Failed to match device\n");
4755 return -ENODEV;
4756 }
4757
4758 spu->spu_type = matched_spu_type->type;
4759 spu->spu_subtype = matched_spu_type->subtype;
4760
4761 i = 0;
4762 for (i = 0; (i < MAX_SPUS) && ((spu_ctrl_regs =
4763 platform_get_resource(pdev, IORESOURCE_MEM, i)) != NULL); i++) {
4764
4765 spu->reg_vbase[i] = devm_ioremap_resource(dev, spu_ctrl_regs);
4766 if (IS_ERR(spu->reg_vbase[i])) {
4767 err = PTR_ERR(spu->reg_vbase[i]);
4768 dev_err(&pdev->dev, "Failed to map registers: %d\n",
4769 err);
4770 spu->reg_vbase[i] = NULL;
4771 return err;
4772 }
4773 }
4774 spu->num_spu = i;
4775 dev_dbg(dev, "Device has %d SPUs", spu->num_spu);
4776
4777 return 0;
4778 }
4779
4780 static int bcm_spu_probe(struct platform_device *pdev)
4781 {
4782 struct device *dev = &pdev->dev;
4783 struct spu_hw *spu = &iproc_priv.spu;
4784 int err = 0;
4785
4786 iproc_priv.pdev = pdev;
4787 platform_set_drvdata(iproc_priv.pdev,
4788 &iproc_priv);
4789
4790 err = spu_dt_read(pdev);
4791 if (err < 0)
4792 goto failure;
4793
4794 err = spu_mb_init(&pdev->dev);
4795 if (err < 0)
4796 goto failure;
4797
4798 if (spu->spu_type == SPU_TYPE_SPUM)
4799 iproc_priv.bcm_hdr_len = 8;
4800 else if (spu->spu_type == SPU_TYPE_SPU2)
4801 iproc_priv.bcm_hdr_len = 0;
4802
4803 spu_functions_register(&pdev->dev, spu->spu_type, spu->spu_subtype);
4804
4805 spu_counters_init();
4806
4807 spu_setup_debugfs();
4808
4809 err = spu_algs_register(dev);
4810 if (err < 0)
4811 goto fail_reg;
4812
4813 return 0;
4814
4815 fail_reg:
4816 spu_free_debugfs();
4817 failure:
4818 spu_mb_release(pdev);
4819 dev_err(dev, "%s failed with error %d.\n", __func__, err);
4820
4821 return err;
4822 }
4823
4824 static int bcm_spu_remove(struct platform_device *pdev)
4825 {
4826 int i;
4827 struct device *dev = &pdev->dev;
4828 char *cdn;
4829
4830 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4831
4832
4833
4834
4835
4836 if (!driver_algs[i].registered)
4837 continue;
4838
4839 switch (driver_algs[i].type) {
4840 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4841 crypto_unregister_alg(&driver_algs[i].alg.crypto);
4842 dev_dbg(dev, " unregistered cipher %s\n",
4843 driver_algs[i].alg.crypto.cra_driver_name);
4844 driver_algs[i].registered = false;
4845 break;
4846 case CRYPTO_ALG_TYPE_AHASH:
4847 crypto_unregister_ahash(&driver_algs[i].alg.hash);
4848 cdn = driver_algs[i].alg.hash.halg.base.cra_driver_name;
4849 dev_dbg(dev, " unregistered hash %s\n", cdn);
4850 driver_algs[i].registered = false;
4851 break;
4852 case CRYPTO_ALG_TYPE_AEAD:
4853 crypto_unregister_aead(&driver_algs[i].alg.aead);
4854 dev_dbg(dev, " unregistered aead %s\n",
4855 driver_algs[i].alg.aead.base.cra_driver_name);
4856 driver_algs[i].registered = false;
4857 break;
4858 }
4859 }
4860 spu_free_debugfs();
4861 spu_mb_release(pdev);
4862 return 0;
4863 }
4864
4865
4866
4867 static struct platform_driver bcm_spu_pdriver = {
4868 .driver = {
4869 .name = "brcm-spu-crypto",
4870 .of_match_table = of_match_ptr(bcm_spu_dt_ids),
4871 },
4872 .probe = bcm_spu_probe,
4873 .remove = bcm_spu_remove,
4874 };
4875 module_platform_driver(bcm_spu_pdriver);
4876
4877 MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>");
4878 MODULE_DESCRIPTION("Broadcom symmetric crypto offload driver");
4879 MODULE_LICENSE("GPL v2");