Lines Matching refs:x0

66 #define roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \  argument
79 vpshufb t4, x0, x0; \
90 filter_8bit(x0, t5, t6, t7, t4); \
92 vextracti128 $1, x0, t0##_x; \
108 vaesenclast t4##_x, x0##_x, x0##_x; \
110 vinserti128 $1, t0##_x, x0, x0; \
140 filter_8bit(x0, t0, t1, t7, t6); \
164 vpxor x5, x0, x0; \
177 vpxor x0, x6, x6; \
184 vpxor x7, x0, x0; \
190 vpxor x0, x5, x5; \
203 vpxor t7, x0, x0; \
204 vpxor 4 * 32(mem_cd), x0, x0; \
250 #define two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument
255 vmovdqu x0, 4 * 32(mem_cd); \
267 store_ab(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab);
269 #define dummy_store(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) /* do nothing */ argument
271 #define store_ab_state(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) \ argument
277 vmovdqu x0, 0 * 32(mem_ab); \
282 #define enc_rounds32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument
284 two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
286 two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
288 two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
291 #define dec_rounds32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument
293 two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
295 two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
297 two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
457 #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ argument
458 vpunpckhdq x1, x0, t2; \
459 vpunpckldq x1, x0, x0; \
464 vpunpckhqdq t1, x0, x1; \
465 vpunpcklqdq t1, x0, x0; \
520 #define inpack32_pre(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument
522 vpbroadcastq key, x0; \
523 vpshufb .Lpack_bswap, x0, x0; \
525 vpxor 0 * 32(rio), x0, y7; \
526 vpxor 1 * 32(rio), x0, y6; \
527 vpxor 2 * 32(rio), x0, y5; \
528 vpxor 3 * 32(rio), x0, y4; \
529 vpxor 4 * 32(rio), x0, y3; \
530 vpxor 5 * 32(rio), x0, y2; \
531 vpxor 6 * 32(rio), x0, y1; \
532 vpxor 7 * 32(rio), x0, y0; \
533 vpxor 8 * 32(rio), x0, x7; \
534 vpxor 9 * 32(rio), x0, x6; \
535 vpxor 10 * 32(rio), x0, x5; \
536 vpxor 11 * 32(rio), x0, x4; \
537 vpxor 12 * 32(rio), x0, x3; \
538 vpxor 13 * 32(rio), x0, x2; \
539 vpxor 14 * 32(rio), x0, x1; \
540 vpxor 15 * 32(rio), x0, x0;
543 #define inpack32_post(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument
545 byteslice_16x16b_fast(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, \
548 vmovdqu x0, 0 * 32(mem_ab); \
566 #define outunpack32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, \ argument
568 byteslice_16x16b_fast(y0, y4, x0, x4, y1, y5, x1, x5, y2, y6, x2, x6, \
571 vmovdqu x0, stack_tmp0; \
573 vpbroadcastq key, x0; \
574 vpshufb .Lpack_bswap, x0, x0; \
576 vpxor x0, y7, y7; \
577 vpxor x0, y6, y6; \
578 vpxor x0, y5, y5; \
579 vpxor x0, y4, y4; \
580 vpxor x0, y3, y3; \
581 vpxor x0, y2, y2; \
582 vpxor x0, y1, y1; \
583 vpxor x0, y0, y0; \
584 vpxor x0, x7, x7; \
585 vpxor x0, x6, x6; \
586 vpxor x0, x5, x5; \
587 vpxor x0, x4, x4; \
588 vpxor x0, x3, x3; \
589 vpxor x0, x2, x2; \
590 vpxor x0, x1, x1; \
591 vpxor stack_tmp0, x0, x0;
593 #define write_output(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ argument
595 vmovdqu x0, 0 * 32(rio); \