Lines Matching refs:ctx
158 static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx) in atmel_sha_append_sg() argument
162 while ((ctx->bufcnt < ctx->buflen) && ctx->total) { in atmel_sha_append_sg()
163 count = min(ctx->sg->length - ctx->offset, ctx->total); in atmel_sha_append_sg()
164 count = min(count, ctx->buflen - ctx->bufcnt); in atmel_sha_append_sg()
173 if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) { in atmel_sha_append_sg()
174 ctx->sg = sg_next(ctx->sg); in atmel_sha_append_sg()
181 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg, in atmel_sha_append_sg()
182 ctx->offset, count, 0); in atmel_sha_append_sg()
184 ctx->bufcnt += count; in atmel_sha_append_sg()
185 ctx->offset += count; in atmel_sha_append_sg()
186 ctx->total -= count; in atmel_sha_append_sg()
188 if (ctx->offset == ctx->sg->length) { in atmel_sha_append_sg()
189 ctx->sg = sg_next(ctx->sg); in atmel_sha_append_sg()
190 if (ctx->sg) in atmel_sha_append_sg()
191 ctx->offset = 0; in atmel_sha_append_sg()
193 ctx->total = 0; in atmel_sha_append_sg()
216 static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length) in atmel_sha_fill_padding() argument
222 size[0] = ctx->digcnt[0]; in atmel_sha_fill_padding()
223 size[1] = ctx->digcnt[1]; in atmel_sha_fill_padding()
225 size[0] += ctx->bufcnt; in atmel_sha_fill_padding()
226 if (size[0] < ctx->bufcnt) in atmel_sha_fill_padding()
236 if (ctx->flags & (SHA_FLAGS_SHA384 | SHA_FLAGS_SHA512)) { in atmel_sha_fill_padding()
237 index = ctx->bufcnt & 0x7f; in atmel_sha_fill_padding()
239 *(ctx->buffer + ctx->bufcnt) = 0x80; in atmel_sha_fill_padding()
240 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1); in atmel_sha_fill_padding()
241 memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16); in atmel_sha_fill_padding()
242 ctx->bufcnt += padlen + 16; in atmel_sha_fill_padding()
243 ctx->flags |= SHA_FLAGS_PAD; in atmel_sha_fill_padding()
245 index = ctx->bufcnt & 0x3f; in atmel_sha_fill_padding()
247 *(ctx->buffer + ctx->bufcnt) = 0x80; in atmel_sha_fill_padding()
248 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1); in atmel_sha_fill_padding()
249 memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8); in atmel_sha_fill_padding()
250 ctx->bufcnt += padlen + 8; in atmel_sha_fill_padding()
251 ctx->flags |= SHA_FLAGS_PAD; in atmel_sha_fill_padding()
259 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); in atmel_sha_init() local
276 ctx->dd = dd; in atmel_sha_init()
278 ctx->flags = 0; in atmel_sha_init()
285 ctx->flags |= SHA_FLAGS_SHA1; in atmel_sha_init()
286 ctx->block_size = SHA1_BLOCK_SIZE; in atmel_sha_init()
289 ctx->flags |= SHA_FLAGS_SHA224; in atmel_sha_init()
290 ctx->block_size = SHA224_BLOCK_SIZE; in atmel_sha_init()
293 ctx->flags |= SHA_FLAGS_SHA256; in atmel_sha_init()
294 ctx->block_size = SHA256_BLOCK_SIZE; in atmel_sha_init()
297 ctx->flags |= SHA_FLAGS_SHA384; in atmel_sha_init()
298 ctx->block_size = SHA384_BLOCK_SIZE; in atmel_sha_init()
301 ctx->flags |= SHA_FLAGS_SHA512; in atmel_sha_init()
302 ctx->block_size = SHA512_BLOCK_SIZE; in atmel_sha_init()
309 ctx->bufcnt = 0; in atmel_sha_init()
310 ctx->digcnt[0] = 0; in atmel_sha_init()
311 ctx->digcnt[1] = 0; in atmel_sha_init()
312 ctx->buflen = SHA_BUFFER_LEN; in atmel_sha_init()
319 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_write_ctrl() local
332 if (ctx->flags & SHA_FLAGS_SHA1) in atmel_sha_write_ctrl()
334 else if (ctx->flags & SHA_FLAGS_SHA224) in atmel_sha_write_ctrl()
336 else if (ctx->flags & SHA_FLAGS_SHA256) in atmel_sha_write_ctrl()
338 else if (ctx->flags & SHA_FLAGS_SHA384) in atmel_sha_write_ctrl()
340 else if (ctx->flags & SHA_FLAGS_SHA512) in atmel_sha_write_ctrl()
344 if (!(ctx->digcnt[0] || ctx->digcnt[1])) in atmel_sha_write_ctrl()
354 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_xmit_cpu() local
359 ctx->digcnt[1], ctx->digcnt[0], length, final); in atmel_sha_xmit_cpu()
364 ctx->digcnt[0] += length; in atmel_sha_xmit_cpu()
365 if (ctx->digcnt[0] < length) in atmel_sha_xmit_cpu()
366 ctx->digcnt[1]++; in atmel_sha_xmit_cpu()
384 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_xmit_pdc() local
388 ctx->digcnt[1], ctx->digcnt[0], length1, final); in atmel_sha_xmit_pdc()
402 ctx->digcnt[0] += length1; in atmel_sha_xmit_pdc()
403 if (ctx->digcnt[0] < length1) in atmel_sha_xmit_pdc()
404 ctx->digcnt[1]++; in atmel_sha_xmit_pdc()
428 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_xmit_dma() local
433 ctx->digcnt[1], ctx->digcnt[0], length1, final); in atmel_sha_xmit_dma()
464 ctx->digcnt[0] += length1; in atmel_sha_xmit_dma()
465 if (ctx->digcnt[0] < length1) in atmel_sha_xmit_dma()
466 ctx->digcnt[1]++; in atmel_sha_xmit_dma()
493 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_update_cpu() local
496 atmel_sha_append_sg(ctx); in atmel_sha_update_cpu()
497 atmel_sha_fill_padding(ctx, 0); in atmel_sha_update_cpu()
498 bufcnt = ctx->bufcnt; in atmel_sha_update_cpu()
499 ctx->bufcnt = 0; in atmel_sha_update_cpu()
501 return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1); in atmel_sha_update_cpu()
505 struct atmel_sha_reqctx *ctx, in atmel_sha_xmit_dma_map() argument
508 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, in atmel_sha_xmit_dma_map()
509 ctx->buflen + ctx->block_size, DMA_TO_DEVICE); in atmel_sha_xmit_dma_map()
510 if (dma_mapping_error(dd->dev, ctx->dma_addr)) { in atmel_sha_xmit_dma_map()
511 dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen + in atmel_sha_xmit_dma_map()
512 ctx->block_size); in atmel_sha_xmit_dma_map()
516 ctx->flags &= ~SHA_FLAGS_SG; in atmel_sha_xmit_dma_map()
519 return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final); in atmel_sha_xmit_dma_map()
524 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_update_dma_slow() local
528 atmel_sha_append_sg(ctx); in atmel_sha_update_dma_slow()
530 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; in atmel_sha_update_dma_slow()
533 ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final); in atmel_sha_update_dma_slow()
536 atmel_sha_fill_padding(ctx, 0); in atmel_sha_update_dma_slow()
538 if (final || (ctx->bufcnt == ctx->buflen)) { in atmel_sha_update_dma_slow()
539 count = ctx->bufcnt; in atmel_sha_update_dma_slow()
540 ctx->bufcnt = 0; in atmel_sha_update_dma_slow()
541 return atmel_sha_xmit_dma_map(dd, ctx, count, final); in atmel_sha_update_dma_slow()
549 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_update_dma_start() local
554 if (!ctx->total) in atmel_sha_update_dma_start()
557 if (ctx->bufcnt || ctx->offset) in atmel_sha_update_dma_start()
561 ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total); in atmel_sha_update_dma_start()
563 sg = ctx->sg; in atmel_sha_update_dma_start()
568 if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size)) in atmel_sha_update_dma_start()
572 length = min(ctx->total, sg->length); in atmel_sha_update_dma_start()
575 if (!(ctx->flags & SHA_FLAGS_FINUP)) { in atmel_sha_update_dma_start()
577 tail = length & (ctx->block_size - 1); in atmel_sha_update_dma_start()
582 ctx->total -= length; in atmel_sha_update_dma_start()
583 ctx->offset = length; /* offset where to start slow */ in atmel_sha_update_dma_start()
585 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; in atmel_sha_update_dma_start()
589 tail = length & (ctx->block_size - 1); in atmel_sha_update_dma_start()
591 ctx->total += tail; in atmel_sha_update_dma_start()
592 ctx->offset = length; /* offset where to start slow */ in atmel_sha_update_dma_start()
594 sg = ctx->sg; in atmel_sha_update_dma_start()
595 atmel_sha_append_sg(ctx); in atmel_sha_update_dma_start()
597 atmel_sha_fill_padding(ctx, length); in atmel_sha_update_dma_start()
599 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, in atmel_sha_update_dma_start()
600 ctx->buflen + ctx->block_size, DMA_TO_DEVICE); in atmel_sha_update_dma_start()
601 if (dma_mapping_error(dd->dev, ctx->dma_addr)) { in atmel_sha_update_dma_start()
603 ctx->buflen + ctx->block_size); in atmel_sha_update_dma_start()
608 ctx->flags &= ~SHA_FLAGS_SG; in atmel_sha_update_dma_start()
609 count = ctx->bufcnt; in atmel_sha_update_dma_start()
610 ctx->bufcnt = 0; in atmel_sha_update_dma_start()
611 return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0, in atmel_sha_update_dma_start()
614 ctx->sg = sg; in atmel_sha_update_dma_start()
615 if (!dma_map_sg(dd->dev, ctx->sg, 1, in atmel_sha_update_dma_start()
621 ctx->flags |= SHA_FLAGS_SG; in atmel_sha_update_dma_start()
623 count = ctx->bufcnt; in atmel_sha_update_dma_start()
624 ctx->bufcnt = 0; in atmel_sha_update_dma_start()
625 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), in atmel_sha_update_dma_start()
626 length, ctx->dma_addr, count, final); in atmel_sha_update_dma_start()
630 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { in atmel_sha_update_dma_start()
635 ctx->flags |= SHA_FLAGS_SG; in atmel_sha_update_dma_start()
638 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0, in atmel_sha_update_dma_start()
644 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_update_dma_stop() local
646 if (ctx->flags & SHA_FLAGS_SG) { in atmel_sha_update_dma_stop()
647 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); in atmel_sha_update_dma_stop()
648 if (ctx->sg->length == ctx->offset) { in atmel_sha_update_dma_stop()
649 ctx->sg = sg_next(ctx->sg); in atmel_sha_update_dma_stop()
650 if (ctx->sg) in atmel_sha_update_dma_stop()
651 ctx->offset = 0; in atmel_sha_update_dma_stop()
653 if (ctx->flags & SHA_FLAGS_PAD) { in atmel_sha_update_dma_stop()
654 dma_unmap_single(dd->dev, ctx->dma_addr, in atmel_sha_update_dma_stop()
655 ctx->buflen + ctx->block_size, DMA_TO_DEVICE); in atmel_sha_update_dma_stop()
658 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen + in atmel_sha_update_dma_stop()
659 ctx->block_size, DMA_TO_DEVICE); in atmel_sha_update_dma_stop()
668 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); in atmel_sha_update_req() local
672 ctx->total, ctx->digcnt[1], ctx->digcnt[0]); in atmel_sha_update_req()
674 if (ctx->flags & SHA_FLAGS_CPU) in atmel_sha_update_req()
681 err, ctx->digcnt[1], ctx->digcnt[0]); in atmel_sha_update_req()
689 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); in atmel_sha_final_req() local
693 if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) { in atmel_sha_final_req()
694 atmel_sha_fill_padding(ctx, 0); in atmel_sha_final_req()
695 count = ctx->bufcnt; in atmel_sha_final_req()
696 ctx->bufcnt = 0; in atmel_sha_final_req()
697 err = atmel_sha_xmit_dma_map(dd, ctx, count, 1); in atmel_sha_final_req()
701 atmel_sha_fill_padding(ctx, 0); in atmel_sha_final_req()
702 count = ctx->bufcnt; in atmel_sha_final_req()
703 ctx->bufcnt = 0; in atmel_sha_final_req()
704 err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1); in atmel_sha_final_req()
714 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); in atmel_sha_copy_hash() local
715 u32 *hash = (u32 *)ctx->digest; in atmel_sha_copy_hash()
718 if (ctx->flags & SHA_FLAGS_SHA1) in atmel_sha_copy_hash()
720 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); in atmel_sha_copy_hash()
721 else if (ctx->flags & SHA_FLAGS_SHA224) in atmel_sha_copy_hash()
723 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); in atmel_sha_copy_hash()
724 else if (ctx->flags & SHA_FLAGS_SHA256) in atmel_sha_copy_hash()
726 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); in atmel_sha_copy_hash()
727 else if (ctx->flags & SHA_FLAGS_SHA384) in atmel_sha_copy_hash()
729 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); in atmel_sha_copy_hash()
732 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); in atmel_sha_copy_hash()
737 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); in atmel_sha_copy_ready_hash() local
742 if (ctx->flags & SHA_FLAGS_SHA1) in atmel_sha_copy_ready_hash()
743 memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE); in atmel_sha_copy_ready_hash()
744 else if (ctx->flags & SHA_FLAGS_SHA224) in atmel_sha_copy_ready_hash()
745 memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE); in atmel_sha_copy_ready_hash()
746 else if (ctx->flags & SHA_FLAGS_SHA256) in atmel_sha_copy_ready_hash()
747 memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE); in atmel_sha_copy_ready_hash()
748 else if (ctx->flags & SHA_FLAGS_SHA384) in atmel_sha_copy_ready_hash()
749 memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE); in atmel_sha_copy_ready_hash()
751 memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE); in atmel_sha_copy_ready_hash()
756 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); in atmel_sha_finish() local
757 struct atmel_sha_dev *dd = ctx->dd; in atmel_sha_finish()
760 if (ctx->digcnt[0] || ctx->digcnt[1]) in atmel_sha_finish()
763 dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %d\n", ctx->digcnt[1], in atmel_sha_finish()
764 ctx->digcnt[0], ctx->bufcnt); in atmel_sha_finish()
771 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); in atmel_sha_finish_req() local
772 struct atmel_sha_dev *dd = ctx->dd; in atmel_sha_finish_req()
779 ctx->flags |= SHA_FLAGS_ERROR; in atmel_sha_finish_req()
833 struct atmel_sha_reqctx *ctx; in atmel_sha_handle_queue() local
861 ctx = ahash_request_ctx(req); in atmel_sha_handle_queue()
864 ctx->op, req->nbytes); in atmel_sha_handle_queue()
871 if (ctx->op == SHA_OP_UPDATE) { in atmel_sha_handle_queue()
873 if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP)) in atmel_sha_handle_queue()
876 } else if (ctx->op == SHA_OP_FINAL) { in atmel_sha_handle_queue()
892 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); in atmel_sha_enqueue() local
896 ctx->op = op; in atmel_sha_enqueue()
903 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); in atmel_sha_update() local
908 ctx->total = req->nbytes; in atmel_sha_update()
909 ctx->sg = req->src; in atmel_sha_update()
910 ctx->offset = 0; in atmel_sha_update()
912 if (ctx->flags & SHA_FLAGS_FINUP) { in atmel_sha_update()
913 if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD) in atmel_sha_update()
915 ctx->flags |= SHA_FLAGS_CPU; in atmel_sha_update()
916 } else if (ctx->bufcnt + ctx->total < ctx->buflen) { in atmel_sha_update()
917 atmel_sha_append_sg(ctx); in atmel_sha_update()
925 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); in atmel_sha_final() local
931 ctx->flags |= SHA_FLAGS_FINUP; in atmel_sha_final()
933 if (ctx->flags & SHA_FLAGS_ERROR) in atmel_sha_final()
936 if (ctx->bufcnt) { in atmel_sha_final()
938 } else if (!(ctx->flags & SHA_FLAGS_PAD)) { /* add padding */ in atmel_sha_final()
960 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); in atmel_sha_finup() local
963 ctx->flags |= SHA_FLAGS_FINUP; in atmel_sha_finup()