This source file includes following definitions.
- sun4i_hash_crainit
- sun4i_hash_init
- sun4i_hash_export_md5
- sun4i_hash_import_md5
- sun4i_hash_export_sha1
- sun4i_hash_import_sha1
- sun4i_hash
- sun4i_hash_final
- sun4i_hash_update
- sun4i_hash_finup
- sun4i_hash_digest
1
2
3
4
5
6
7
8
9
10
11 #include "sun4i-ss.h"
12 #include <linux/scatterlist.h>
13
14
15 #define SS_TIMEOUT 100
16
17 int sun4i_hash_crainit(struct crypto_tfm *tfm)
18 {
19 struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
20 struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
21 struct sun4i_ss_alg_template *algt;
22
23 memset(op, 0, sizeof(struct sun4i_tfm_ctx));
24
25 algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
26 op->ss = algt->ss;
27
28 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
29 sizeof(struct sun4i_req_ctx));
30 return 0;
31 }
32
33
34 int sun4i_hash_init(struct ahash_request *areq)
35 {
36 struct sun4i_req_ctx *op = ahash_request_ctx(areq);
37 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
38 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
39 struct sun4i_ss_alg_template *algt;
40
41 memset(op, 0, sizeof(struct sun4i_req_ctx));
42
43 algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
44 op->mode = algt->mode;
45
46 return 0;
47 }
48
49 int sun4i_hash_export_md5(struct ahash_request *areq, void *out)
50 {
51 struct sun4i_req_ctx *op = ahash_request_ctx(areq);
52 struct md5_state *octx = out;
53 int i;
54
55 octx->byte_count = op->byte_count + op->len;
56
57 memcpy(octx->block, op->buf, op->len);
58
59 if (op->byte_count) {
60 for (i = 0; i < 4; i++)
61 octx->hash[i] = op->hash[i];
62 } else {
63 octx->hash[0] = SHA1_H0;
64 octx->hash[1] = SHA1_H1;
65 octx->hash[2] = SHA1_H2;
66 octx->hash[3] = SHA1_H3;
67 }
68
69 return 0;
70 }
71
72 int sun4i_hash_import_md5(struct ahash_request *areq, const void *in)
73 {
74 struct sun4i_req_ctx *op = ahash_request_ctx(areq);
75 const struct md5_state *ictx = in;
76 int i;
77
78 sun4i_hash_init(areq);
79
80 op->byte_count = ictx->byte_count & ~0x3F;
81 op->len = ictx->byte_count & 0x3F;
82
83 memcpy(op->buf, ictx->block, op->len);
84
85 for (i = 0; i < 4; i++)
86 op->hash[i] = ictx->hash[i];
87
88 return 0;
89 }
90
91 int sun4i_hash_export_sha1(struct ahash_request *areq, void *out)
92 {
93 struct sun4i_req_ctx *op = ahash_request_ctx(areq);
94 struct sha1_state *octx = out;
95 int i;
96
97 octx->count = op->byte_count + op->len;
98
99 memcpy(octx->buffer, op->buf, op->len);
100
101 if (op->byte_count) {
102 for (i = 0; i < 5; i++)
103 octx->state[i] = op->hash[i];
104 } else {
105 octx->state[0] = SHA1_H0;
106 octx->state[1] = SHA1_H1;
107 octx->state[2] = SHA1_H2;
108 octx->state[3] = SHA1_H3;
109 octx->state[4] = SHA1_H4;
110 }
111
112 return 0;
113 }
114
115 int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in)
116 {
117 struct sun4i_req_ctx *op = ahash_request_ctx(areq);
118 const struct sha1_state *ictx = in;
119 int i;
120
121 sun4i_hash_init(areq);
122
123 op->byte_count = ictx->count & ~0x3F;
124 op->len = ictx->count & 0x3F;
125
126 memcpy(op->buf, ictx->buffer, op->len);
127
128 for (i = 0; i < 5; i++)
129 op->hash[i] = ictx->state[i];
130
131 return 0;
132 }
133
134 #define SS_HASH_UPDATE 1
135 #define SS_HASH_FINAL 2
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164 static int sun4i_hash(struct ahash_request *areq)
165 {
166
167
168
169
170
171
172
173
174
175
176 unsigned int i = 0, end, fill, min_fill, nwait, nbw = 0, j = 0, todo;
177 unsigned int in_i = 0;
178 u32 spaces, rx_cnt = SS_RX_DEFAULT, bf[32] = {0}, v, ivmode = 0;
179 struct sun4i_req_ctx *op = ahash_request_ctx(areq);
180 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
181 struct sun4i_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
182 struct sun4i_ss_ctx *ss = tfmctx->ss;
183 struct scatterlist *in_sg = areq->src;
184 struct sg_mapping_iter mi;
185 int in_r, err = 0;
186 size_t copied = 0;
187 __le32 wb = 0;
188
189 dev_dbg(ss->dev, "%s %s bc=%llu len=%u mode=%x wl=%u h0=%0x",
190 __func__, crypto_tfm_alg_name(areq->base.tfm),
191 op->byte_count, areq->nbytes, op->mode,
192 op->len, op->hash[0]);
193
194 if (unlikely(!areq->nbytes) && !(op->flags & SS_HASH_FINAL))
195 return 0;
196
197
198 if (unlikely(areq->nbytes > UINT_MAX - op->len)) {
199 dev_err(ss->dev, "Cannot process too large request\n");
200 return -EINVAL;
201 }
202
203 if (op->len + areq->nbytes < 64 && !(op->flags & SS_HASH_FINAL)) {
204
205 copied = sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
206 op->buf + op->len, areq->nbytes, 0);
207 op->len += copied;
208 return 0;
209 }
210
211 spin_lock_bh(&ss->slock);
212
213
214
215
216
217 if (op->byte_count) {
218 ivmode = SS_IV_ARBITRARY;
219 for (i = 0; i < 5; i++)
220 writel(op->hash[i], ss->base + SS_IV0 + i * 4);
221 }
222
223 writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL);
224
225 if (!(op->flags & SS_HASH_UPDATE))
226 goto hash_final;
227
228
229 if (!(op->flags & SS_HASH_FINAL)) {
230 end = ((areq->nbytes + op->len) / 64) * 64 - op->len;
231
232 if (end > areq->nbytes || areq->nbytes - end > 63) {
233 dev_err(ss->dev, "ERROR: Bound error %u %u\n",
234 end, areq->nbytes);
235 err = -EINVAL;
236 goto release_ss;
237 }
238 } else {
239
240 if (areq->nbytes < 4)
241 end = 0;
242 else
243 end = ((areq->nbytes + op->len) / 4) * 4 - op->len;
244 }
245
246
247 i = 1;
248 while (in_sg && i == 1) {
249 if (in_sg->length % 4)
250 i = 0;
251 in_sg = sg_next(in_sg);
252 }
253 if (i == 1 && !op->len && areq->nbytes)
254 dev_dbg(ss->dev, "We can DMA\n");
255
256 i = 0;
257 sg_miter_start(&mi, areq->src, sg_nents(areq->src),
258 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
259 sg_miter_next(&mi);
260 in_i = 0;
261
262 do {
263
264
265
266
267
268 if (op->len || (mi.length - in_i) < 4) {
269
270
271
272
273
274 while (op->len < 64 && i < end) {
275
276 in_r = min(end - i, 64 - op->len);
277 in_r = min_t(size_t, mi.length - in_i, in_r);
278 memcpy(op->buf + op->len, mi.addr + in_i, in_r);
279 op->len += in_r;
280 i += in_r;
281 in_i += in_r;
282 if (in_i == mi.length) {
283 sg_miter_next(&mi);
284 in_i = 0;
285 }
286 }
287 if (op->len > 3 && !(op->len % 4)) {
288
289 writesl(ss->base + SS_RXFIFO, op->buf,
290 op->len / 4);
291 op->byte_count += op->len;
292 op->len = 0;
293 }
294 }
295 if (mi.length - in_i > 3 && i < end) {
296
297 in_r = min_t(size_t, mi.length - in_i, areq->nbytes - i);
298 in_r = min_t(size_t, ((mi.length - in_i) / 4) * 4, in_r);
299
300 todo = min3((u32)(end - i) / 4, rx_cnt, (u32)in_r / 4);
301 writesl(ss->base + SS_RXFIFO, mi.addr + in_i, todo);
302 op->byte_count += todo * 4;
303 i += todo * 4;
304 in_i += todo * 4;
305 rx_cnt -= todo;
306 if (!rx_cnt) {
307 spaces = readl(ss->base + SS_FCSR);
308 rx_cnt = SS_RXFIFO_SPACES(spaces);
309 }
310 if (in_i == mi.length) {
311 sg_miter_next(&mi);
312 in_i = 0;
313 }
314 }
315 } while (i < end);
316
317
318
319
320
321 if ((areq->nbytes - i) < 64) {
322 while (i < areq->nbytes && in_i < mi.length && op->len < 64) {
323
324 in_r = min(areq->nbytes - i, 64 - op->len);
325 in_r = min_t(size_t, mi.length - in_i, in_r);
326 memcpy(op->buf + op->len, mi.addr + in_i, in_r);
327 op->len += in_r;
328 i += in_r;
329 in_i += in_r;
330 if (in_i == mi.length) {
331 sg_miter_next(&mi);
332 in_i = 0;
333 }
334 }
335 }
336
337 sg_miter_stop(&mi);
338
339
340
341
342
343
344 if (op->flags & SS_HASH_FINAL)
345 goto hash_final;
346
347 writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL);
348 i = 0;
349 do {
350 v = readl(ss->base + SS_CTL);
351 i++;
352 } while (i < SS_TIMEOUT && (v & SS_DATA_END));
353 if (unlikely(i >= SS_TIMEOUT)) {
354 dev_err_ratelimited(ss->dev,
355 "ERROR: hash end timeout %d>%d ctl=%x len=%u\n",
356 i, SS_TIMEOUT, v, areq->nbytes);
357 err = -EIO;
358 goto release_ss;
359 }
360
361
362
363
364
365
366
367
368 ndelay(1);
369
370 for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++)
371 op->hash[i] = readl(ss->base + SS_MD0 + i * 4);
372
373 goto release_ss;
374
375
376
377
378
379
380
381
382
383
384
385
386
387 hash_final:
388
389
390 if (op->len) {
391 nwait = op->len / 4;
392 if (nwait) {
393 writesl(ss->base + SS_RXFIFO, op->buf, nwait);
394 op->byte_count += 4 * nwait;
395 }
396
397 nbw = op->len - 4 * nwait;
398 if (nbw) {
399 wb = cpu_to_le32(*(u32 *)(op->buf + nwait * 4));
400 wb &= GENMASK((nbw * 8) - 1, 0);
401
402 op->byte_count += nbw;
403 }
404 }
405
406
407 wb |= ((1 << 7) << (nbw * 8));
408 bf[j++] = le32_to_cpu(wb);
409
410
411
412
413
414
415
416 fill = 64 - (op->byte_count % 64);
417 min_fill = 2 * sizeof(u32) + (nbw ? 0 : sizeof(u32));
418
419
420 if (fill < min_fill)
421 fill += 64;
422
423 j += (fill - min_fill) / sizeof(u32);
424
425
426 if (op->mode == SS_OP_SHA1) {
427 __be64 *bits = (__be64 *)&bf[j];
428 *bits = cpu_to_be64(op->byte_count << 3);
429 j += 2;
430 } else {
431 __le64 *bits = (__le64 *)&bf[j];
432 *bits = cpu_to_le64(op->byte_count << 3);
433 j += 2;
434 }
435 writesl(ss->base + SS_RXFIFO, bf, j);
436
437
438 writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL);
439
440
441
442
443
444
445 i = 0;
446 do {
447 v = readl(ss->base + SS_CTL);
448 i++;
449 } while (i < SS_TIMEOUT && (v & SS_DATA_END));
450 if (unlikely(i >= SS_TIMEOUT)) {
451 dev_err_ratelimited(ss->dev,
452 "ERROR: hash end timeout %d>%d ctl=%x len=%u\n",
453 i, SS_TIMEOUT, v, areq->nbytes);
454 err = -EIO;
455 goto release_ss;
456 }
457
458
459
460
461
462
463
464
465 ndelay(1);
466
467
468 if (op->mode == SS_OP_SHA1) {
469 for (i = 0; i < 5; i++) {
470 v = cpu_to_be32(readl(ss->base + SS_MD0 + i * 4));
471 memcpy(areq->result + i * 4, &v, 4);
472 }
473 } else {
474 for (i = 0; i < 4; i++) {
475 v = cpu_to_le32(readl(ss->base + SS_MD0 + i * 4));
476 memcpy(areq->result + i * 4, &v, 4);
477 }
478 }
479
480 release_ss:
481 writel(0, ss->base + SS_CTL);
482 spin_unlock_bh(&ss->slock);
483 return err;
484 }
485
486 int sun4i_hash_final(struct ahash_request *areq)
487 {
488 struct sun4i_req_ctx *op = ahash_request_ctx(areq);
489
490 op->flags = SS_HASH_FINAL;
491 return sun4i_hash(areq);
492 }
493
494 int sun4i_hash_update(struct ahash_request *areq)
495 {
496 struct sun4i_req_ctx *op = ahash_request_ctx(areq);
497
498 op->flags = SS_HASH_UPDATE;
499 return sun4i_hash(areq);
500 }
501
502
503 int sun4i_hash_finup(struct ahash_request *areq)
504 {
505 struct sun4i_req_ctx *op = ahash_request_ctx(areq);
506
507 op->flags = SS_HASH_UPDATE | SS_HASH_FINAL;
508 return sun4i_hash(areq);
509 }
510
511
512 int sun4i_hash_digest(struct ahash_request *areq)
513 {
514 int err;
515 struct sun4i_req_ctx *op = ahash_request_ctx(areq);
516
517 err = sun4i_hash_init(areq);
518 if (err)
519 return err;
520
521 op->flags = SS_HASH_UPDATE | SS_HASH_FINAL;
522 return sun4i_hash(areq);
523 }