This source file includes following definitions.
- crypto4xx_hw_init
- crypto4xx_alloc_sa
- crypto4xx_free_sa
- crypto4xx_build_pdr
- crypto4xx_destroy_pdr
- crypto4xx_get_pd_from_pdr_nolock
- crypto4xx_put_pd_to_pdr
- crypto4xx_build_gdr
- crypto4xx_destroy_gdr
- crypto4xx_get_n_gd
- crypto4xx_put_gd_to_gdr
- crypto4xx_get_gdp
- crypto4xx_build_sdr
- crypto4xx_destroy_sdr
- crypto4xx_get_n_sd
- crypto4xx_put_sd_to_sdr
- crypto4xx_get_sdp
- crypto4xx_copy_pkt_to_dst
- crypto4xx_copy_digest_to_dst
- crypto4xx_ret_sg_desc
- crypto4xx_cipher_done
- crypto4xx_ahash_done
- crypto4xx_aead_done
- crypto4xx_pd_done
- crypto4xx_stop_all
- get_next_gd
- get_next_sd
- crypto4xx_build_pd
- crypto4xx_ctx_init
- crypto4xx_sk_init
- crypto4xx_common_exit
- crypto4xx_sk_exit
- crypto4xx_aead_init
- crypto4xx_aead_exit
- crypto4xx_register_alg
- crypto4xx_unregister_alg
- crypto4xx_bh_tasklet_cb
- crypto4xx_interrupt_handler
- crypto4xx_ce_interrupt_handler
- crypto4xx_ce_interrupt_handler_revb
- ppc4xx_prng_data_read
- crypto4xx_prng_generate
- crypto4xx_prng_seed
- crypto4xx_probe
- crypto4xx_remove
1
2
3
4
5
6
7
8
9
10
11
12 #include <linux/kernel.h>
13 #include <linux/interrupt.h>
14 #include <linux/spinlock_types.h>
15 #include <linux/random.h>
16 #include <linux/scatterlist.h>
17 #include <linux/crypto.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/platform_device.h>
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/of_address.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_platform.h>
25 #include <linux/slab.h>
26 #include <asm/dcr.h>
27 #include <asm/dcr-regs.h>
28 #include <asm/cacheflush.h>
29 #include <crypto/aead.h>
30 #include <crypto/aes.h>
31 #include <crypto/ctr.h>
32 #include <crypto/gcm.h>
33 #include <crypto/sha.h>
34 #include <crypto/rng.h>
35 #include <crypto/scatterwalk.h>
36 #include <crypto/skcipher.h>
37 #include <crypto/internal/aead.h>
38 #include <crypto/internal/rng.h>
39 #include <crypto/internal/skcipher.h>
40 #include "crypto4xx_reg_def.h"
41 #include "crypto4xx_core.h"
42 #include "crypto4xx_sa.h"
43 #include "crypto4xx_trng.h"
44
45 #define PPC4XX_SEC_VERSION_STR "0.5"
46
47
48
49
50 static void crypto4xx_hw_init(struct crypto4xx_device *dev)
51 {
52 union ce_ring_size ring_size;
53 union ce_ring_control ring_ctrl;
54 union ce_part_ring_size part_ring_size;
55 union ce_io_threshold io_threshold;
56 u32 rand_num;
57 union ce_pe_dma_cfg pe_dma_cfg;
58 u32 device_ctrl;
59
60 writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG);
61
62 pe_dma_cfg.w = 0;
63 pe_dma_cfg.bf.bo_sgpd_en = 1;
64 pe_dma_cfg.bf.bo_data_en = 0;
65 pe_dma_cfg.bf.bo_sa_en = 1;
66 pe_dma_cfg.bf.bo_pd_en = 1;
67 pe_dma_cfg.bf.dynamic_sa_en = 1;
68 pe_dma_cfg.bf.reset_sg = 1;
69 pe_dma_cfg.bf.reset_pdr = 1;
70 pe_dma_cfg.bf.reset_pe = 1;
71 writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
72
73 pe_dma_cfg.bf.pe_mode = 0;
74 pe_dma_cfg.bf.reset_sg = 0;
75 pe_dma_cfg.bf.reset_pdr = 0;
76 pe_dma_cfg.bf.reset_pe = 0;
77 pe_dma_cfg.bf.bo_td_en = 0;
78 writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
79 writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_PDR_BASE);
80 writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_RDR_BASE);
81 writel(PPC4XX_PRNG_CTRL_AUTO_EN, dev->ce_base + CRYPTO4XX_PRNG_CTRL);
82 get_random_bytes(&rand_num, sizeof(rand_num));
83 writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_L);
84 get_random_bytes(&rand_num, sizeof(rand_num));
85 writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_H);
86 ring_size.w = 0;
87 ring_size.bf.ring_offset = PPC4XX_PD_SIZE;
88 ring_size.bf.ring_size = PPC4XX_NUM_PD;
89 writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE);
90 ring_ctrl.w = 0;
91 writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL);
92 device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
93 device_ctrl |= PPC4XX_DC_3DES_EN;
94 writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
95 writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE);
96 writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE);
97 part_ring_size.w = 0;
98 part_ring_size.bf.sdr_size = PPC4XX_SDR_SIZE;
99 part_ring_size.bf.gdr_size = PPC4XX_GDR_SIZE;
100 writel(part_ring_size.w, dev->ce_base + CRYPTO4XX_PART_RING_SIZE);
101 writel(PPC4XX_SD_BUFFER_SIZE, dev->ce_base + CRYPTO4XX_PART_RING_CFG);
102 io_threshold.w = 0;
103 io_threshold.bf.output_threshold = PPC4XX_OUTPUT_THRESHOLD;
104 io_threshold.bf.input_threshold = PPC4XX_INPUT_THRESHOLD;
105 writel(io_threshold.w, dev->ce_base + CRYPTO4XX_IO_THRESHOLD);
106 writel(0, dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR);
107 writel(0, dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR);
108 writel(0, dev->ce_base + CRYPTO4XX_PKT_SRC_UADDR);
109 writel(0, dev->ce_base + CRYPTO4XX_PKT_DEST_UADDR);
110 writel(0, dev->ce_base + CRYPTO4XX_SA_UADDR);
111 writel(0, dev->ce_base + CRYPTO4XX_GATH_RING_BASE_UADDR);
112 writel(0, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE_UADDR);
113
114 pe_dma_cfg.bf.pe_mode = 1;
115 pe_dma_cfg.bf.reset_sg = 0;
116 pe_dma_cfg.bf.reset_pdr = 0;
117 pe_dma_cfg.bf.reset_pe = 0;
118 pe_dma_cfg.bf.bo_td_en = 0;
119 writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
120
121 writel(PPC4XX_INTERRUPT_CLR, dev->ce_base + CRYPTO4XX_INT_CLR);
122 writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
123 writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
124 writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
125 if (dev->is_revb) {
126 writel(PPC4XX_INT_TIMEOUT_CNT_REVB << 10,
127 dev->ce_base + CRYPTO4XX_INT_TIMEOUT_CNT);
128 writel(PPC4XX_PD_DONE_INT | PPC4XX_TMO_ERR_INT,
129 dev->ce_base + CRYPTO4XX_INT_EN);
130 } else {
131 writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
132 }
133 }
134
135 int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
136 {
137 ctx->sa_in = kcalloc(size, 4, GFP_ATOMIC);
138 if (ctx->sa_in == NULL)
139 return -ENOMEM;
140
141 ctx->sa_out = kcalloc(size, 4, GFP_ATOMIC);
142 if (ctx->sa_out == NULL) {
143 kfree(ctx->sa_in);
144 ctx->sa_in = NULL;
145 return -ENOMEM;
146 }
147
148 ctx->sa_len = size;
149
150 return 0;
151 }
152
153 void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
154 {
155 kfree(ctx->sa_in);
156 ctx->sa_in = NULL;
157 kfree(ctx->sa_out);
158 ctx->sa_out = NULL;
159 ctx->sa_len = 0;
160 }
161
162
163
164
165
166
167 static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
168 {
169 int i;
170 dev->pdr = dma_alloc_coherent(dev->core_dev->device,
171 sizeof(struct ce_pd) * PPC4XX_NUM_PD,
172 &dev->pdr_pa, GFP_ATOMIC);
173 if (!dev->pdr)
174 return -ENOMEM;
175
176 dev->pdr_uinfo = kcalloc(PPC4XX_NUM_PD, sizeof(struct pd_uinfo),
177 GFP_KERNEL);
178 if (!dev->pdr_uinfo) {
179 dma_free_coherent(dev->core_dev->device,
180 sizeof(struct ce_pd) * PPC4XX_NUM_PD,
181 dev->pdr,
182 dev->pdr_pa);
183 return -ENOMEM;
184 }
185 dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
186 sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
187 &dev->shadow_sa_pool_pa,
188 GFP_ATOMIC);
189 if (!dev->shadow_sa_pool)
190 return -ENOMEM;
191
192 dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device,
193 sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
194 &dev->shadow_sr_pool_pa, GFP_ATOMIC);
195 if (!dev->shadow_sr_pool)
196 return -ENOMEM;
197 for (i = 0; i < PPC4XX_NUM_PD; i++) {
198 struct ce_pd *pd = &dev->pdr[i];
199 struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[i];
200
201 pd->sa = dev->shadow_sa_pool_pa +
202 sizeof(union shadow_sa_buf) * i;
203
204
205 pd_uinfo->sa_va = &dev->shadow_sa_pool[i].sa;
206
207
208 pd_uinfo->sr_va = &dev->shadow_sr_pool[i];
209 pd_uinfo->sr_pa = dev->shadow_sr_pool_pa +
210 sizeof(struct sa_state_record) * i;
211 }
212
213 return 0;
214 }
215
216 static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
217 {
218 if (dev->pdr)
219 dma_free_coherent(dev->core_dev->device,
220 sizeof(struct ce_pd) * PPC4XX_NUM_PD,
221 dev->pdr, dev->pdr_pa);
222
223 if (dev->shadow_sa_pool)
224 dma_free_coherent(dev->core_dev->device,
225 sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
226 dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
227
228 if (dev->shadow_sr_pool)
229 dma_free_coherent(dev->core_dev->device,
230 sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
231 dev->shadow_sr_pool, dev->shadow_sr_pool_pa);
232
233 kfree(dev->pdr_uinfo);
234 }
235
236 static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev)
237 {
238 u32 retval;
239 u32 tmp;
240
241 retval = dev->pdr_head;
242 tmp = (dev->pdr_head + 1) % PPC4XX_NUM_PD;
243
244 if (tmp == dev->pdr_tail)
245 return ERING_WAS_FULL;
246
247 dev->pdr_head = tmp;
248
249 return retval;
250 }
251
252 static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
253 {
254 struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
255 u32 tail;
256 unsigned long flags;
257
258 spin_lock_irqsave(&dev->core_dev->lock, flags);
259 pd_uinfo->state = PD_ENTRY_FREE;
260
261 if (dev->pdr_tail != PPC4XX_LAST_PD)
262 dev->pdr_tail++;
263 else
264 dev->pdr_tail = 0;
265 tail = dev->pdr_tail;
266 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
267
268 return tail;
269 }
270
271
272
273
274
275
276 static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
277 {
278 dev->gdr = dma_alloc_coherent(dev->core_dev->device,
279 sizeof(struct ce_gd) * PPC4XX_NUM_GD,
280 &dev->gdr_pa, GFP_ATOMIC);
281 if (!dev->gdr)
282 return -ENOMEM;
283
284 return 0;
285 }
286
287 static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
288 {
289 dma_free_coherent(dev->core_dev->device,
290 sizeof(struct ce_gd) * PPC4XX_NUM_GD,
291 dev->gdr, dev->gdr_pa);
292 }
293
294
295
296
297
298 static u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
299 {
300 u32 retval;
301 u32 tmp;
302
303 if (n >= PPC4XX_NUM_GD)
304 return ERING_WAS_FULL;
305
306 retval = dev->gdr_head;
307 tmp = (dev->gdr_head + n) % PPC4XX_NUM_GD;
308 if (dev->gdr_head > dev->gdr_tail) {
309 if (tmp < dev->gdr_head && tmp >= dev->gdr_tail)
310 return ERING_WAS_FULL;
311 } else if (dev->gdr_head < dev->gdr_tail) {
312 if (tmp < dev->gdr_head || tmp >= dev->gdr_tail)
313 return ERING_WAS_FULL;
314 }
315 dev->gdr_head = tmp;
316
317 return retval;
318 }
319
320 static u32 crypto4xx_put_gd_to_gdr(struct crypto4xx_device *dev)
321 {
322 unsigned long flags;
323
324 spin_lock_irqsave(&dev->core_dev->lock, flags);
325 if (dev->gdr_tail == dev->gdr_head) {
326 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
327 return 0;
328 }
329
330 if (dev->gdr_tail != PPC4XX_LAST_GD)
331 dev->gdr_tail++;
332 else
333 dev->gdr_tail = 0;
334
335 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
336
337 return 0;
338 }
339
340 static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
341 dma_addr_t *gd_dma, u32 idx)
342 {
343 *gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx;
344
345 return &dev->gdr[idx];
346 }
347
348
349
350
351
352
353 static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
354 {
355 int i;
356
357
358 dev->sdr = dma_alloc_coherent(dev->core_dev->device,
359 sizeof(struct ce_sd) * PPC4XX_NUM_SD,
360 &dev->sdr_pa, GFP_ATOMIC);
361 if (!dev->sdr)
362 return -ENOMEM;
363
364 dev->scatter_buffer_va =
365 dma_alloc_coherent(dev->core_dev->device,
366 PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
367 &dev->scatter_buffer_pa, GFP_ATOMIC);
368 if (!dev->scatter_buffer_va)
369 return -ENOMEM;
370
371 for (i = 0; i < PPC4XX_NUM_SD; i++) {
372 dev->sdr[i].ptr = dev->scatter_buffer_pa +
373 PPC4XX_SD_BUFFER_SIZE * i;
374 }
375
376 return 0;
377 }
378
379 static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
380 {
381 if (dev->sdr)
382 dma_free_coherent(dev->core_dev->device,
383 sizeof(struct ce_sd) * PPC4XX_NUM_SD,
384 dev->sdr, dev->sdr_pa);
385
386 if (dev->scatter_buffer_va)
387 dma_free_coherent(dev->core_dev->device,
388 PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
389 dev->scatter_buffer_va,
390 dev->scatter_buffer_pa);
391 }
392
393
394
395
396
397 static u32 crypto4xx_get_n_sd(struct crypto4xx_device *dev, int n)
398 {
399 u32 retval;
400 u32 tmp;
401
402 if (n >= PPC4XX_NUM_SD)
403 return ERING_WAS_FULL;
404
405 retval = dev->sdr_head;
406 tmp = (dev->sdr_head + n) % PPC4XX_NUM_SD;
407 if (dev->sdr_head > dev->gdr_tail) {
408 if (tmp < dev->sdr_head && tmp >= dev->sdr_tail)
409 return ERING_WAS_FULL;
410 } else if (dev->sdr_head < dev->sdr_tail) {
411 if (tmp < dev->sdr_head || tmp >= dev->sdr_tail)
412 return ERING_WAS_FULL;
413 }
414 dev->sdr_head = tmp;
415
416 return retval;
417 }
418
419 static u32 crypto4xx_put_sd_to_sdr(struct crypto4xx_device *dev)
420 {
421 unsigned long flags;
422
423 spin_lock_irqsave(&dev->core_dev->lock, flags);
424 if (dev->sdr_tail == dev->sdr_head) {
425 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
426 return 0;
427 }
428 if (dev->sdr_tail != PPC4XX_LAST_SD)
429 dev->sdr_tail++;
430 else
431 dev->sdr_tail = 0;
432 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
433
434 return 0;
435 }
436
437 static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev,
438 dma_addr_t *sd_dma, u32 idx)
439 {
440 *sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx;
441
442 return &dev->sdr[idx];
443 }
444
445 static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
446 struct ce_pd *pd,
447 struct pd_uinfo *pd_uinfo,
448 u32 nbytes,
449 struct scatterlist *dst)
450 {
451 unsigned int first_sd = pd_uinfo->first_sd;
452 unsigned int last_sd;
453 unsigned int overflow = 0;
454 unsigned int to_copy;
455 unsigned int dst_start = 0;
456
457
458
459
460
461
462
463 last_sd = (first_sd + pd_uinfo->num_sd);
464 if (last_sd > PPC4XX_LAST_SD) {
465 last_sd = PPC4XX_LAST_SD;
466 overflow = last_sd % PPC4XX_NUM_SD;
467 }
468
469 while (nbytes) {
470 void *buf = dev->scatter_buffer_va +
471 first_sd * PPC4XX_SD_BUFFER_SIZE;
472
473 to_copy = min(nbytes, PPC4XX_SD_BUFFER_SIZE *
474 (1 + last_sd - first_sd));
475 scatterwalk_map_and_copy(buf, dst, dst_start, to_copy, 1);
476 nbytes -= to_copy;
477
478 if (overflow) {
479 first_sd = 0;
480 last_sd = overflow;
481 dst_start += to_copy;
482 overflow = 0;
483 }
484 }
485 }
486
487 static void crypto4xx_copy_digest_to_dst(void *dst,
488 struct pd_uinfo *pd_uinfo,
489 struct crypto4xx_ctx *ctx)
490 {
491 struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
492
493 if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
494 memcpy(dst, pd_uinfo->sr_va->save_digest,
495 SA_HASH_ALG_SHA1_DIGEST_SIZE);
496 }
497 }
498
499 static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
500 struct pd_uinfo *pd_uinfo)
501 {
502 int i;
503 if (pd_uinfo->num_gd) {
504 for (i = 0; i < pd_uinfo->num_gd; i++)
505 crypto4xx_put_gd_to_gdr(dev);
506 pd_uinfo->first_gd = 0xffffffff;
507 pd_uinfo->num_gd = 0;
508 }
509 if (pd_uinfo->num_sd) {
510 for (i = 0; i < pd_uinfo->num_sd; i++)
511 crypto4xx_put_sd_to_sdr(dev);
512
513 pd_uinfo->first_sd = 0xffffffff;
514 pd_uinfo->num_sd = 0;
515 }
516 }
517
518 static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
519 struct pd_uinfo *pd_uinfo,
520 struct ce_pd *pd)
521 {
522 struct skcipher_request *req;
523 struct scatterlist *dst;
524 dma_addr_t addr;
525
526 req = skcipher_request_cast(pd_uinfo->async_req);
527
528 if (pd_uinfo->sa_va->sa_command_0.bf.scatter) {
529 crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
530 req->cryptlen, req->dst);
531 } else {
532 dst = pd_uinfo->dest_va;
533 addr = dma_map_page(dev->core_dev->device, sg_page(dst),
534 dst->offset, dst->length, DMA_FROM_DEVICE);
535 }
536
537 if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) {
538 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
539
540 crypto4xx_memcpy_from_le32((u32 *)req->iv,
541 pd_uinfo->sr_va->save_iv,
542 crypto_skcipher_ivsize(skcipher));
543 }
544
545 crypto4xx_ret_sg_desc(dev, pd_uinfo);
546
547 if (pd_uinfo->state & PD_ENTRY_BUSY)
548 skcipher_request_complete(req, -EINPROGRESS);
549 skcipher_request_complete(req, 0);
550 }
551
552 static void crypto4xx_ahash_done(struct crypto4xx_device *dev,
553 struct pd_uinfo *pd_uinfo)
554 {
555 struct crypto4xx_ctx *ctx;
556 struct ahash_request *ahash_req;
557
558 ahash_req = ahash_request_cast(pd_uinfo->async_req);
559 ctx = crypto_tfm_ctx(ahash_req->base.tfm);
560
561 crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo,
562 crypto_tfm_ctx(ahash_req->base.tfm));
563 crypto4xx_ret_sg_desc(dev, pd_uinfo);
564
565 if (pd_uinfo->state & PD_ENTRY_BUSY)
566 ahash_request_complete(ahash_req, -EINPROGRESS);
567 ahash_request_complete(ahash_req, 0);
568 }
569
570 static void crypto4xx_aead_done(struct crypto4xx_device *dev,
571 struct pd_uinfo *pd_uinfo,
572 struct ce_pd *pd)
573 {
574 struct aead_request *aead_req = container_of(pd_uinfo->async_req,
575 struct aead_request, base);
576 struct scatterlist *dst = pd_uinfo->dest_va;
577 size_t cp_len = crypto_aead_authsize(
578 crypto_aead_reqtfm(aead_req));
579 u32 icv[AES_BLOCK_SIZE];
580 int err = 0;
581
582 if (pd_uinfo->sa_va->sa_command_0.bf.scatter) {
583 crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
584 pd->pd_ctl_len.bf.pkt_len,
585 dst);
586 } else {
587 dma_unmap_page(dev->core_dev->device, pd->dest, dst->length,
588 DMA_FROM_DEVICE);
589 }
590
591 if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) {
592
593 crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest,
594 sizeof(icv));
595
596 scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen,
597 cp_len, 1);
598 } else {
599
600 scatterwalk_map_and_copy(icv, aead_req->src,
601 aead_req->assoclen + aead_req->cryptlen -
602 cp_len, cp_len, 0);
603
604 crypto4xx_memcpy_from_le32(icv, icv, sizeof(icv));
605
606 if (crypto_memneq(icv, pd_uinfo->sr_va->save_digest, cp_len))
607 err = -EBADMSG;
608 }
609
610 crypto4xx_ret_sg_desc(dev, pd_uinfo);
611
612 if (pd->pd_ctl.bf.status & 0xff) {
613 if (!__ratelimit(&dev->aead_ratelimit)) {
614 if (pd->pd_ctl.bf.status & 2)
615 pr_err("pad fail error\n");
616 if (pd->pd_ctl.bf.status & 4)
617 pr_err("seqnum fail\n");
618 if (pd->pd_ctl.bf.status & 8)
619 pr_err("error _notify\n");
620 pr_err("aead return err status = 0x%02x\n",
621 pd->pd_ctl.bf.status & 0xff);
622 pr_err("pd pad_ctl = 0x%08x\n",
623 pd->pd_ctl.bf.pd_pad_ctl);
624 }
625 err = -EINVAL;
626 }
627
628 if (pd_uinfo->state & PD_ENTRY_BUSY)
629 aead_request_complete(aead_req, -EINPROGRESS);
630
631 aead_request_complete(aead_req, err);
632 }
633
634 static void crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
635 {
636 struct ce_pd *pd = &dev->pdr[idx];
637 struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
638
639 switch (crypto_tfm_alg_type(pd_uinfo->async_req->tfm)) {
640 case CRYPTO_ALG_TYPE_SKCIPHER:
641 crypto4xx_cipher_done(dev, pd_uinfo, pd);
642 break;
643 case CRYPTO_ALG_TYPE_AEAD:
644 crypto4xx_aead_done(dev, pd_uinfo, pd);
645 break;
646 case CRYPTO_ALG_TYPE_AHASH:
647 crypto4xx_ahash_done(dev, pd_uinfo);
648 break;
649 }
650 }
651
652 static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
653 {
654 crypto4xx_destroy_pdr(core_dev->dev);
655 crypto4xx_destroy_gdr(core_dev->dev);
656 crypto4xx_destroy_sdr(core_dev->dev);
657 iounmap(core_dev->dev->ce_base);
658 kfree(core_dev->dev);
659 kfree(core_dev);
660 }
661
662 static u32 get_next_gd(u32 current)
663 {
664 if (current != PPC4XX_LAST_GD)
665 return current + 1;
666 else
667 return 0;
668 }
669
670 static u32 get_next_sd(u32 current)
671 {
672 if (current != PPC4XX_LAST_SD)
673 return current + 1;
674 else
675 return 0;
676 }
677
678 int crypto4xx_build_pd(struct crypto_async_request *req,
679 struct crypto4xx_ctx *ctx,
680 struct scatterlist *src,
681 struct scatterlist *dst,
682 const unsigned int datalen,
683 const __le32 *iv, const u32 iv_len,
684 const struct dynamic_sa_ctl *req_sa,
685 const unsigned int sa_len,
686 const unsigned int assoclen,
687 struct scatterlist *_dst)
688 {
689 struct crypto4xx_device *dev = ctx->dev;
690 struct dynamic_sa_ctl *sa;
691 struct ce_gd *gd;
692 struct ce_pd *pd;
693 u32 num_gd, num_sd;
694 u32 fst_gd = 0xffffffff;
695 u32 fst_sd = 0xffffffff;
696 u32 pd_entry;
697 unsigned long flags;
698 struct pd_uinfo *pd_uinfo;
699 unsigned int nbytes = datalen;
700 size_t offset_to_sr_ptr;
701 u32 gd_idx = 0;
702 int tmp;
703 bool is_busy, force_sd;
704
705
706
707
708
709
710
711
712
713
714
715
716
717 force_sd = (req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_CFB
718 || req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_OFB)
719 && (datalen % AES_BLOCK_SIZE);
720
721
722 tmp = sg_nents_for_len(src, assoclen + datalen);
723 if (tmp < 0) {
724 dev_err(dev->core_dev->device, "Invalid number of src SG.\n");
725 return tmp;
726 }
727 if (tmp == 1)
728 tmp = 0;
729 num_gd = tmp;
730
731 if (assoclen) {
732 nbytes += assoclen;
733 dst = scatterwalk_ffwd(_dst, dst, assoclen);
734 }
735
736
737 if (sg_is_last(dst) && force_sd == false) {
738 num_sd = 0;
739 } else {
740 if (datalen > PPC4XX_SD_BUFFER_SIZE) {
741 num_sd = datalen / PPC4XX_SD_BUFFER_SIZE;
742 if (datalen % PPC4XX_SD_BUFFER_SIZE)
743 num_sd++;
744 } else {
745 num_sd = 1;
746 }
747 }
748
749
750
751
752
753
754
755 spin_lock_irqsave(&dev->core_dev->lock, flags);
756
757
758
759
760
761
762
763 if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
764 is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
765 ((PPC4XX_NUM_PD * 13) / 16);
766 } else {
767
768
769
770
771
772 is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
773 ((PPC4XX_NUM_PD * 15) / 16);
774
775 if (is_busy) {
776 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
777 return -EBUSY;
778 }
779 }
780
781 if (num_gd) {
782 fst_gd = crypto4xx_get_n_gd(dev, num_gd);
783 if (fst_gd == ERING_WAS_FULL) {
784 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
785 return -EAGAIN;
786 }
787 }
788 if (num_sd) {
789 fst_sd = crypto4xx_get_n_sd(dev, num_sd);
790 if (fst_sd == ERING_WAS_FULL) {
791 if (num_gd)
792 dev->gdr_head = fst_gd;
793 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
794 return -EAGAIN;
795 }
796 }
797 pd_entry = crypto4xx_get_pd_from_pdr_nolock(dev);
798 if (pd_entry == ERING_WAS_FULL) {
799 if (num_gd)
800 dev->gdr_head = fst_gd;
801 if (num_sd)
802 dev->sdr_head = fst_sd;
803 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
804 return -EAGAIN;
805 }
806 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
807
808 pd = &dev->pdr[pd_entry];
809 pd->sa_len = sa_len;
810
811 pd_uinfo = &dev->pdr_uinfo[pd_entry];
812 pd_uinfo->num_gd = num_gd;
813 pd_uinfo->num_sd = num_sd;
814 pd_uinfo->dest_va = dst;
815 pd_uinfo->async_req = req;
816
817 if (iv_len)
818 memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len);
819
820 sa = pd_uinfo->sa_va;
821 memcpy(sa, req_sa, sa_len * 4);
822
823 sa->sa_command_1.bf.hash_crypto_offset = (assoclen >> 2);
824 offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(sa);
825 *(u32 *)((unsigned long)sa + offset_to_sr_ptr) = pd_uinfo->sr_pa;
826
827 if (num_gd) {
828 dma_addr_t gd_dma;
829 struct scatterlist *sg;
830
831
832 gd_idx = fst_gd;
833 pd_uinfo->first_gd = fst_gd;
834 gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
835 pd->src = gd_dma;
836
837 sa->sa_command_0.bf.gather = 1;
838
839
840 sg = src;
841 while (nbytes) {
842 size_t len;
843
844 len = min(sg->length, nbytes);
845 gd->ptr = dma_map_page(dev->core_dev->device,
846 sg_page(sg), sg->offset, len, DMA_TO_DEVICE);
847 gd->ctl_len.len = len;
848 gd->ctl_len.done = 0;
849 gd->ctl_len.ready = 1;
850 if (len >= nbytes)
851 break;
852
853 nbytes -= sg->length;
854 gd_idx = get_next_gd(gd_idx);
855 gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
856 sg = sg_next(sg);
857 }
858 } else {
859 pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src),
860 src->offset, min(nbytes, src->length),
861 DMA_TO_DEVICE);
862
863
864
865 sa->sa_command_0.bf.gather = 0;
866
867
868
869 pd_uinfo->first_gd = 0xffffffff;
870 }
871 if (!num_sd) {
872
873
874
875
876 pd_uinfo->first_sd = 0xffffffff;
877 sa->sa_command_0.bf.scatter = 0;
878 pd->dest = (u32)dma_map_page(dev->core_dev->device,
879 sg_page(dst), dst->offset,
880 min(datalen, dst->length),
881 DMA_TO_DEVICE);
882 } else {
883 dma_addr_t sd_dma;
884 struct ce_sd *sd = NULL;
885
886 u32 sd_idx = fst_sd;
887 nbytes = datalen;
888 sa->sa_command_0.bf.scatter = 1;
889 pd_uinfo->first_sd = fst_sd;
890 sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
891 pd->dest = sd_dma;
892
893 sd->ctl.done = 0;
894 sd->ctl.rdy = 1;
895
896 if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
897 nbytes -= PPC4XX_SD_BUFFER_SIZE;
898 else
899 nbytes = 0;
900 while (nbytes) {
901 sd_idx = get_next_sd(sd_idx);
902 sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
903
904 sd->ctl.done = 0;
905 sd->ctl.rdy = 1;
906 if (nbytes >= PPC4XX_SD_BUFFER_SIZE) {
907 nbytes -= PPC4XX_SD_BUFFER_SIZE;
908 } else {
909
910
911
912
913 nbytes = 0;
914 }
915 }
916 }
917
918 pd->pd_ctl.w = PD_CTL_HOST_READY |
919 ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) |
920 (crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ?
921 PD_CTL_HASH_FINAL : 0);
922 pd->pd_ctl_len.w = 0x00400000 | (assoclen + datalen);
923 pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0);
924
925 wmb();
926
927 writel(0, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
928 writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
929 return is_busy ? -EBUSY : -EINPROGRESS;
930 }
931
932
933
934
935 static void crypto4xx_ctx_init(struct crypto4xx_alg *amcc_alg,
936 struct crypto4xx_ctx *ctx)
937 {
938 ctx->dev = amcc_alg->dev;
939 ctx->sa_in = NULL;
940 ctx->sa_out = NULL;
941 ctx->sa_len = 0;
942 }
943
944 static int crypto4xx_sk_init(struct crypto_skcipher *sk)
945 {
946 struct skcipher_alg *alg = crypto_skcipher_alg(sk);
947 struct crypto4xx_alg *amcc_alg;
948 struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(sk);
949
950 if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
951 ctx->sw_cipher.cipher =
952 crypto_alloc_sync_skcipher(alg->base.cra_name, 0,
953 CRYPTO_ALG_NEED_FALLBACK);
954 if (IS_ERR(ctx->sw_cipher.cipher))
955 return PTR_ERR(ctx->sw_cipher.cipher);
956 }
957
958 amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher);
959 crypto4xx_ctx_init(amcc_alg, ctx);
960 return 0;
961 }
962
963 static void crypto4xx_common_exit(struct crypto4xx_ctx *ctx)
964 {
965 crypto4xx_free_sa(ctx);
966 }
967
968 static void crypto4xx_sk_exit(struct crypto_skcipher *sk)
969 {
970 struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(sk);
971
972 crypto4xx_common_exit(ctx);
973 if (ctx->sw_cipher.cipher)
974 crypto_free_sync_skcipher(ctx->sw_cipher.cipher);
975 }
976
977 static int crypto4xx_aead_init(struct crypto_aead *tfm)
978 {
979 struct aead_alg *alg = crypto_aead_alg(tfm);
980 struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
981 struct crypto4xx_alg *amcc_alg;
982
983 ctx->sw_cipher.aead = crypto_alloc_aead(alg->base.cra_name, 0,
984 CRYPTO_ALG_NEED_FALLBACK |
985 CRYPTO_ALG_ASYNC);
986 if (IS_ERR(ctx->sw_cipher.aead))
987 return PTR_ERR(ctx->sw_cipher.aead);
988
989 amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.aead);
990 crypto4xx_ctx_init(amcc_alg, ctx);
991 crypto_aead_set_reqsize(tfm, max(sizeof(struct aead_request) + 32 +
992 crypto_aead_reqsize(ctx->sw_cipher.aead),
993 sizeof(struct crypto4xx_aead_reqctx)));
994 return 0;
995 }
996
997 static void crypto4xx_aead_exit(struct crypto_aead *tfm)
998 {
999 struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
1000
1001 crypto4xx_common_exit(ctx);
1002 crypto_free_aead(ctx->sw_cipher.aead);
1003 }
1004
1005 static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
1006 struct crypto4xx_alg_common *crypto_alg,
1007 int array_size)
1008 {
1009 struct crypto4xx_alg *alg;
1010 int i;
1011 int rc = 0;
1012
1013 for (i = 0; i < array_size; i++) {
1014 alg = kzalloc(sizeof(struct crypto4xx_alg), GFP_KERNEL);
1015 if (!alg)
1016 return -ENOMEM;
1017
1018 alg->alg = crypto_alg[i];
1019 alg->dev = sec_dev;
1020
1021 switch (alg->alg.type) {
1022 case CRYPTO_ALG_TYPE_AEAD:
1023 rc = crypto_register_aead(&alg->alg.u.aead);
1024 break;
1025
1026 case CRYPTO_ALG_TYPE_AHASH:
1027 rc = crypto_register_ahash(&alg->alg.u.hash);
1028 break;
1029
1030 case CRYPTO_ALG_TYPE_RNG:
1031 rc = crypto_register_rng(&alg->alg.u.rng);
1032 break;
1033
1034 default:
1035 rc = crypto_register_skcipher(&alg->alg.u.cipher);
1036 break;
1037 }
1038
1039 if (rc)
1040 kfree(alg);
1041 else
1042 list_add_tail(&alg->entry, &sec_dev->alg_list);
1043 }
1044
1045 return 0;
1046 }
1047
1048 static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
1049 {
1050 struct crypto4xx_alg *alg, *tmp;
1051
1052 list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
1053 list_del(&alg->entry);
1054 switch (alg->alg.type) {
1055 case CRYPTO_ALG_TYPE_AHASH:
1056 crypto_unregister_ahash(&alg->alg.u.hash);
1057 break;
1058
1059 case CRYPTO_ALG_TYPE_AEAD:
1060 crypto_unregister_aead(&alg->alg.u.aead);
1061 break;
1062
1063 case CRYPTO_ALG_TYPE_RNG:
1064 crypto_unregister_rng(&alg->alg.u.rng);
1065 break;
1066
1067 default:
1068 crypto_unregister_skcipher(&alg->alg.u.cipher);
1069 }
1070 kfree(alg);
1071 }
1072 }
1073
1074 static void crypto4xx_bh_tasklet_cb(unsigned long data)
1075 {
1076 struct device *dev = (struct device *)data;
1077 struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1078 struct pd_uinfo *pd_uinfo;
1079 struct ce_pd *pd;
1080 u32 tail = core_dev->dev->pdr_tail;
1081 u32 head = core_dev->dev->pdr_head;
1082
1083 do {
1084 pd_uinfo = &core_dev->dev->pdr_uinfo[tail];
1085 pd = &core_dev->dev->pdr[tail];
1086 if ((pd_uinfo->state & PD_ENTRY_INUSE) &&
1087 ((READ_ONCE(pd->pd_ctl.w) &
1088 (PD_CTL_PE_DONE | PD_CTL_HOST_READY)) ==
1089 PD_CTL_PE_DONE)) {
1090 crypto4xx_pd_done(core_dev->dev, tail);
1091 tail = crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
1092 } else {
1093
1094 break;
1095 }
1096 } while (head != tail);
1097 }
1098
1099
1100
1101
1102 static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data,
1103 u32 clr_val)
1104 {
1105 struct device *dev = (struct device *)data;
1106 struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1107
1108 writel(clr_val, core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
1109 tasklet_schedule(&core_dev->tasklet);
1110
1111 return IRQ_HANDLED;
1112 }
1113
1114 static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
1115 {
1116 return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR);
1117 }
1118
1119 static irqreturn_t crypto4xx_ce_interrupt_handler_revb(int irq, void *data)
1120 {
1121 return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR |
1122 PPC4XX_TMO_ERR_INT);
1123 }
1124
1125 static int ppc4xx_prng_data_read(struct crypto4xx_device *dev,
1126 u8 *data, unsigned int max)
1127 {
1128 unsigned int i, curr = 0;
1129 u32 val[2];
1130
1131 do {
1132
1133 writel(PPC4XX_PRNG_CTRL_AUTO_EN,
1134 dev->ce_base + CRYPTO4XX_PRNG_CTRL);
1135
1136 for (i = 0; i < 1024; i++) {
1137
1138 if ((readl(dev->ce_base + CRYPTO4XX_PRNG_STAT) &
1139 CRYPTO4XX_PRNG_STAT_BUSY))
1140 continue;
1141
1142 val[0] = readl_be(dev->ce_base + CRYPTO4XX_PRNG_RES_0);
1143 val[1] = readl_be(dev->ce_base + CRYPTO4XX_PRNG_RES_1);
1144 break;
1145 }
1146 if (i == 1024)
1147 return -ETIMEDOUT;
1148
1149 if ((max - curr) >= 8) {
1150 memcpy(data, &val, 8);
1151 data += 8;
1152 curr += 8;
1153 } else {
1154
1155 memcpy(data, &val, max - curr);
1156 break;
1157 }
1158 } while (curr < max);
1159
1160 return curr;
1161 }
1162
1163 static int crypto4xx_prng_generate(struct crypto_rng *tfm,
1164 const u8 *src, unsigned int slen,
1165 u8 *dstn, unsigned int dlen)
1166 {
1167 struct rng_alg *alg = crypto_rng_alg(tfm);
1168 struct crypto4xx_alg *amcc_alg;
1169 struct crypto4xx_device *dev;
1170 int ret;
1171
1172 amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.rng);
1173 dev = amcc_alg->dev;
1174
1175 mutex_lock(&dev->core_dev->rng_lock);
1176 ret = ppc4xx_prng_data_read(dev, dstn, dlen);
1177 mutex_unlock(&dev->core_dev->rng_lock);
1178 return ret;
1179 }
1180
1181
1182 static int crypto4xx_prng_seed(struct crypto_rng *tfm, const u8 *seed,
1183 unsigned int slen)
1184 {
1185 return 0;
1186 }
1187
1188
1189
1190
1191 static struct crypto4xx_alg_common crypto4xx_alg[] = {
1192
1193 { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1194 .base = {
1195 .cra_name = "cbc(aes)",
1196 .cra_driver_name = "cbc-aes-ppc4xx",
1197 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1198 .cra_flags = CRYPTO_ALG_ASYNC |
1199 CRYPTO_ALG_KERN_DRIVER_ONLY,
1200 .cra_blocksize = AES_BLOCK_SIZE,
1201 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1202 .cra_module = THIS_MODULE,
1203 },
1204 .min_keysize = AES_MIN_KEY_SIZE,
1205 .max_keysize = AES_MAX_KEY_SIZE,
1206 .ivsize = AES_IV_SIZE,
1207 .setkey = crypto4xx_setkey_aes_cbc,
1208 .encrypt = crypto4xx_encrypt_iv_block,
1209 .decrypt = crypto4xx_decrypt_iv_block,
1210 .init = crypto4xx_sk_init,
1211 .exit = crypto4xx_sk_exit,
1212 } },
1213 { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1214 .base = {
1215 .cra_name = "cfb(aes)",
1216 .cra_driver_name = "cfb-aes-ppc4xx",
1217 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1218 .cra_flags = CRYPTO_ALG_ASYNC |
1219 CRYPTO_ALG_KERN_DRIVER_ONLY,
1220 .cra_blocksize = 1,
1221 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1222 .cra_module = THIS_MODULE,
1223 },
1224 .min_keysize = AES_MIN_KEY_SIZE,
1225 .max_keysize = AES_MAX_KEY_SIZE,
1226 .ivsize = AES_IV_SIZE,
1227 .setkey = crypto4xx_setkey_aes_cfb,
1228 .encrypt = crypto4xx_encrypt_iv_stream,
1229 .decrypt = crypto4xx_decrypt_iv_stream,
1230 .init = crypto4xx_sk_init,
1231 .exit = crypto4xx_sk_exit,
1232 } },
1233 { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1234 .base = {
1235 .cra_name = "ctr(aes)",
1236 .cra_driver_name = "ctr-aes-ppc4xx",
1237 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1238 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
1239 CRYPTO_ALG_ASYNC |
1240 CRYPTO_ALG_KERN_DRIVER_ONLY,
1241 .cra_blocksize = 1,
1242 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1243 .cra_module = THIS_MODULE,
1244 },
1245 .min_keysize = AES_MIN_KEY_SIZE,
1246 .max_keysize = AES_MAX_KEY_SIZE,
1247 .ivsize = AES_IV_SIZE,
1248 .setkey = crypto4xx_setkey_aes_ctr,
1249 .encrypt = crypto4xx_encrypt_ctr,
1250 .decrypt = crypto4xx_decrypt_ctr,
1251 .init = crypto4xx_sk_init,
1252 .exit = crypto4xx_sk_exit,
1253 } },
1254 { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1255 .base = {
1256 .cra_name = "rfc3686(ctr(aes))",
1257 .cra_driver_name = "rfc3686-ctr-aes-ppc4xx",
1258 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1259 .cra_flags = CRYPTO_ALG_ASYNC |
1260 CRYPTO_ALG_KERN_DRIVER_ONLY,
1261 .cra_blocksize = 1,
1262 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1263 .cra_module = THIS_MODULE,
1264 },
1265 .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
1266 .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
1267 .ivsize = CTR_RFC3686_IV_SIZE,
1268 .setkey = crypto4xx_setkey_rfc3686,
1269 .encrypt = crypto4xx_rfc3686_encrypt,
1270 .decrypt = crypto4xx_rfc3686_decrypt,
1271 .init = crypto4xx_sk_init,
1272 .exit = crypto4xx_sk_exit,
1273 } },
1274 { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1275 .base = {
1276 .cra_name = "ecb(aes)",
1277 .cra_driver_name = "ecb-aes-ppc4xx",
1278 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1279 .cra_flags = CRYPTO_ALG_ASYNC |
1280 CRYPTO_ALG_KERN_DRIVER_ONLY,
1281 .cra_blocksize = AES_BLOCK_SIZE,
1282 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1283 .cra_module = THIS_MODULE,
1284 },
1285 .min_keysize = AES_MIN_KEY_SIZE,
1286 .max_keysize = AES_MAX_KEY_SIZE,
1287 .setkey = crypto4xx_setkey_aes_ecb,
1288 .encrypt = crypto4xx_encrypt_noiv_block,
1289 .decrypt = crypto4xx_decrypt_noiv_block,
1290 .init = crypto4xx_sk_init,
1291 .exit = crypto4xx_sk_exit,
1292 } },
1293 { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1294 .base = {
1295 .cra_name = "ofb(aes)",
1296 .cra_driver_name = "ofb-aes-ppc4xx",
1297 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1298 .cra_flags = CRYPTO_ALG_ASYNC |
1299 CRYPTO_ALG_KERN_DRIVER_ONLY,
1300 .cra_blocksize = 1,
1301 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1302 .cra_module = THIS_MODULE,
1303 },
1304 .min_keysize = AES_MIN_KEY_SIZE,
1305 .max_keysize = AES_MAX_KEY_SIZE,
1306 .ivsize = AES_IV_SIZE,
1307 .setkey = crypto4xx_setkey_aes_ofb,
1308 .encrypt = crypto4xx_encrypt_iv_stream,
1309 .decrypt = crypto4xx_decrypt_iv_stream,
1310 .init = crypto4xx_sk_init,
1311 .exit = crypto4xx_sk_exit,
1312 } },
1313
1314
1315 { .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
1316 .setkey = crypto4xx_setkey_aes_ccm,
1317 .setauthsize = crypto4xx_setauthsize_aead,
1318 .encrypt = crypto4xx_encrypt_aes_ccm,
1319 .decrypt = crypto4xx_decrypt_aes_ccm,
1320 .init = crypto4xx_aead_init,
1321 .exit = crypto4xx_aead_exit,
1322 .ivsize = AES_BLOCK_SIZE,
1323 .maxauthsize = 16,
1324 .base = {
1325 .cra_name = "ccm(aes)",
1326 .cra_driver_name = "ccm-aes-ppc4xx",
1327 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1328 .cra_flags = CRYPTO_ALG_ASYNC |
1329 CRYPTO_ALG_NEED_FALLBACK |
1330 CRYPTO_ALG_KERN_DRIVER_ONLY,
1331 .cra_blocksize = 1,
1332 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1333 .cra_module = THIS_MODULE,
1334 },
1335 } },
1336 { .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
1337 .setkey = crypto4xx_setkey_aes_gcm,
1338 .setauthsize = crypto4xx_setauthsize_aead,
1339 .encrypt = crypto4xx_encrypt_aes_gcm,
1340 .decrypt = crypto4xx_decrypt_aes_gcm,
1341 .init = crypto4xx_aead_init,
1342 .exit = crypto4xx_aead_exit,
1343 .ivsize = GCM_AES_IV_SIZE,
1344 .maxauthsize = 16,
1345 .base = {
1346 .cra_name = "gcm(aes)",
1347 .cra_driver_name = "gcm-aes-ppc4xx",
1348 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1349 .cra_flags = CRYPTO_ALG_ASYNC |
1350 CRYPTO_ALG_NEED_FALLBACK |
1351 CRYPTO_ALG_KERN_DRIVER_ONLY,
1352 .cra_blocksize = 1,
1353 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1354 .cra_module = THIS_MODULE,
1355 },
1356 } },
1357 { .type = CRYPTO_ALG_TYPE_RNG, .u.rng = {
1358 .base = {
1359 .cra_name = "stdrng",
1360 .cra_driver_name = "crypto4xx_rng",
1361 .cra_priority = 300,
1362 .cra_ctxsize = 0,
1363 .cra_module = THIS_MODULE,
1364 },
1365 .generate = crypto4xx_prng_generate,
1366 .seed = crypto4xx_prng_seed,
1367 .seedsize = 0,
1368 } },
1369 };
1370
1371
1372
1373
1374 static int crypto4xx_probe(struct platform_device *ofdev)
1375 {
1376 int rc;
1377 struct resource res;
1378 struct device *dev = &ofdev->dev;
1379 struct crypto4xx_core_device *core_dev;
1380 u32 pvr;
1381 bool is_revb = true;
1382
1383 rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
1384 if (rc)
1385 return -ENODEV;
1386
1387 if (of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto")) {
1388 mtdcri(SDR0, PPC460EX_SDR0_SRST,
1389 mfdcri(SDR0, PPC460EX_SDR0_SRST) | PPC460EX_CE_RESET);
1390 mtdcri(SDR0, PPC460EX_SDR0_SRST,
1391 mfdcri(SDR0, PPC460EX_SDR0_SRST) & ~PPC460EX_CE_RESET);
1392 } else if (of_find_compatible_node(NULL, NULL,
1393 "amcc,ppc405ex-crypto")) {
1394 mtdcri(SDR0, PPC405EX_SDR0_SRST,
1395 mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
1396 mtdcri(SDR0, PPC405EX_SDR0_SRST,
1397 mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
1398 is_revb = false;
1399 } else if (of_find_compatible_node(NULL, NULL,
1400 "amcc,ppc460sx-crypto")) {
1401 mtdcri(SDR0, PPC460SX_SDR0_SRST,
1402 mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET);
1403 mtdcri(SDR0, PPC460SX_SDR0_SRST,
1404 mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET);
1405 } else {
1406 printk(KERN_ERR "Crypto Function Not supported!\n");
1407 return -EINVAL;
1408 }
1409
1410 core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL);
1411 if (!core_dev)
1412 return -ENOMEM;
1413
1414 dev_set_drvdata(dev, core_dev);
1415 core_dev->ofdev = ofdev;
1416 core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL);
1417 rc = -ENOMEM;
1418 if (!core_dev->dev)
1419 goto err_alloc_dev;
1420
1421
1422
1423
1424
1425 pvr = mfspr(SPRN_PVR);
1426 if (is_revb && ((pvr >> 4) == 0x130218A)) {
1427 u32 min = PVR_MIN(pvr);
1428
1429 if (min < 4) {
1430 dev_info(dev, "RevA detected - disable interrupt coalescing\n");
1431 is_revb = false;
1432 }
1433 }
1434
1435 core_dev->dev->core_dev = core_dev;
1436 core_dev->dev->is_revb = is_revb;
1437 core_dev->device = dev;
1438 mutex_init(&core_dev->rng_lock);
1439 spin_lock_init(&core_dev->lock);
1440 INIT_LIST_HEAD(&core_dev->dev->alg_list);
1441 ratelimit_default_init(&core_dev->dev->aead_ratelimit);
1442 rc = crypto4xx_build_pdr(core_dev->dev);
1443 if (rc)
1444 goto err_build_pdr;
1445
1446 rc = crypto4xx_build_gdr(core_dev->dev);
1447 if (rc)
1448 goto err_build_pdr;
1449
1450 rc = crypto4xx_build_sdr(core_dev->dev);
1451 if (rc)
1452 goto err_build_sdr;
1453
1454
1455 tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
1456 (unsigned long) dev);
1457
1458 core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
1459 if (!core_dev->dev->ce_base) {
1460 dev_err(dev, "failed to of_iomap\n");
1461 rc = -ENOMEM;
1462 goto err_iomap;
1463 }
1464
1465
1466 core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
1467 rc = request_irq(core_dev->irq, is_revb ?
1468 crypto4xx_ce_interrupt_handler_revb :
1469 crypto4xx_ce_interrupt_handler, 0,
1470 KBUILD_MODNAME, dev);
1471 if (rc)
1472 goto err_request_irq;
1473
1474
1475 crypto4xx_hw_init(core_dev->dev);
1476
1477
1478 rc = crypto4xx_register_alg(core_dev->dev, crypto4xx_alg,
1479 ARRAY_SIZE(crypto4xx_alg));
1480 if (rc)
1481 goto err_start_dev;
1482
1483 ppc4xx_trng_probe(core_dev);
1484 return 0;
1485
1486 err_start_dev:
1487 free_irq(core_dev->irq, dev);
1488 err_request_irq:
1489 irq_dispose_mapping(core_dev->irq);
1490 iounmap(core_dev->dev->ce_base);
1491 err_iomap:
1492 tasklet_kill(&core_dev->tasklet);
1493 err_build_sdr:
1494 crypto4xx_destroy_sdr(core_dev->dev);
1495 crypto4xx_destroy_gdr(core_dev->dev);
1496 err_build_pdr:
1497 crypto4xx_destroy_pdr(core_dev->dev);
1498 kfree(core_dev->dev);
1499 err_alloc_dev:
1500 kfree(core_dev);
1501
1502 return rc;
1503 }
1504
1505 static int crypto4xx_remove(struct platform_device *ofdev)
1506 {
1507 struct device *dev = &ofdev->dev;
1508 struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1509
1510 ppc4xx_trng_remove(core_dev);
1511
1512 free_irq(core_dev->irq, dev);
1513 irq_dispose_mapping(core_dev->irq);
1514
1515 tasklet_kill(&core_dev->tasklet);
1516
1517 crypto4xx_unregister_alg(core_dev->dev);
1518 mutex_destroy(&core_dev->rng_lock);
1519
1520 crypto4xx_stop_all(core_dev);
1521
1522 return 0;
1523 }
1524
1525 static const struct of_device_id crypto4xx_match[] = {
1526 { .compatible = "amcc,ppc4xx-crypto",},
1527 { },
1528 };
1529 MODULE_DEVICE_TABLE(of, crypto4xx_match);
1530
1531 static struct platform_driver crypto4xx_driver = {
1532 .driver = {
1533 .name = KBUILD_MODNAME,
1534 .of_match_table = crypto4xx_match,
1535 },
1536 .probe = crypto4xx_probe,
1537 .remove = crypto4xx_remove,
1538 };
1539
1540 module_platform_driver(crypto4xx_driver);
1541
1542 MODULE_LICENSE("GPL");
1543 MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
1544 MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator");