Lines Matching refs:TMP3

163 .macro GHASH_MUL GH HK TMP1 TMP2 TMP3 TMP4 TMP5
166 pshufd $78, \HK, \TMP3
168 pxor \HK, \TMP3 # TMP3 = b1+b0
171 PCLMULQDQ 0x00, \TMP3, \TMP2 # TMP2 = (a0+a1)*(b1+b0)
174 movdqa \TMP2, \TMP3
175 pslldq $8, \TMP3 # left shift TMP3 2 DWs
177 pxor \TMP3, \GH
183 movdqa \GH, \TMP3
184 movdqa \GH, \TMP4 # copy GH into TMP2,TMP3 and TMP4
188 pslld $30, \TMP3 # packed right shift <<30
190 pxor \TMP3, \TMP2 # xor the shifted versions
199 movdqa \GH,\TMP2 # copy GH into TMP2,TMP3 and TMP4
202 movdqa \GH,\TMP3
205 psrld $2,\TMP3 # packed left shift >>2
207 pxor \TMP3,\TMP2 # xor the shifted versions
226 .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
303 GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
308 GHASH_MUL %xmm6, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
310 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
312 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
315 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
317 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
320 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
352 movdqa \TMP3, \TMP5
353 pshufd $78, \TMP3, \TMP1
354 pxor \TMP3, \TMP1
356 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
370 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
383 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
451 .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
524 GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
529 GHASH_MUL %xmm6, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
531 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
533 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
536 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
538 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
541 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
573 movdqa \TMP3, \TMP5
574 pshufd $78, \TMP3, \TMP1
575 pxor \TMP3, \TMP1
577 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
591 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
604 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
662 .macro GHASH_4_ENCRYPT_4_PARALLEL_ENC TMP1 TMP2 TMP3 TMP4 TMP5 \
713 movaps 0x30(%arg1), \TMP3
714 AESENC \TMP3, \XMM1 # Round 3
715 AESENC \TMP3, \XMM2
716 AESENC \TMP3, \XMM3
717 AESENC \TMP3, \XMM4
719 movaps 0x40(%arg1), \TMP3
720 AESENC \TMP3, \XMM1 # Round 4
721 AESENC \TMP3, \XMM2
722 AESENC \TMP3, \XMM3
723 AESENC \TMP3, \XMM4
726 movaps 0x50(%arg1), \TMP3
727 AESENC \TMP3, \XMM1 # Round 5
728 AESENC \TMP3, \XMM2
729 AESENC \TMP3, \XMM3
730 AESENC \TMP3, \XMM4
743 movaps 0x60(%arg1), \TMP3
744 AESENC \TMP3, \XMM1 # Round 6
745 AESENC \TMP3, \XMM2
746 AESENC \TMP3, \XMM3
747 AESENC \TMP3, \XMM4
749 movaps 0x70(%arg1), \TMP3
750 AESENC \TMP3, \XMM1 # Round 7
751 AESENC \TMP3, \XMM2
752 AESENC \TMP3, \XMM3
753 AESENC \TMP3, \XMM4
756 movaps 0x80(%arg1), \TMP3
757 AESENC \TMP3, \XMM1 # Round 8
758 AESENC \TMP3, \XMM2
759 AESENC \TMP3, \XMM3
760 AESENC \TMP3, \XMM4
774 movaps 0x90(%arg1), \TMP3
775 AESENC \TMP3, \XMM1 # Round 9
776 AESENC \TMP3, \XMM2
777 AESENC \TMP3, \XMM3
778 AESENC \TMP3, \XMM4
787 MOVADQ (%r10),\TMP3
789 AESENC \TMP3, %xmm\index
796 MOVADQ (%r10), \TMP3
797 AESENCLAST \TMP3, \XMM1 # Round 10
798 AESENCLAST \TMP3, \XMM2
799 AESENCLAST \TMP3, \XMM3
800 AESENCLAST \TMP3, \XMM4
803 movdqu (%arg3,%r11,1), \TMP3
804 pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK
805 movdqu 16(%arg3,%r11,1), \TMP3
806 pxor \TMP3, \XMM2 # Ciphertext/Plaintext XOR EK
807 movdqu 32(%arg3,%r11,1), \TMP3
808 pxor \TMP3, \XMM3 # Ciphertext/Plaintext XOR EK
809 movdqu 48(%arg3,%r11,1), \TMP3
810 pxor \TMP3, \XMM4 # Ciphertext/Plaintext XOR EK
825 movdqa \TMP2, \TMP3
826 pslldq $8, \TMP3 # left shift TMP3 2 DWs
828 pxor \TMP3, \XMM5
834 movdqa \XMM5, \TMP3
836 # move XMM5 into TMP2, TMP3, TMP4 in order to perform shifts independently
838 pslld $30, \TMP3 # packed right shift << 30
840 pxor \TMP3, \TMP2 # xor the shifted versions
849 movdqa \XMM5,\TMP2 # make 3 copies of XMM5 into TMP2, TMP3, TMP4
850 movdqa \XMM5,\TMP3
853 psrld $2, \TMP3 # packed left shift >>2
855 pxor \TMP3,\TMP2 # xor the shifted versions
870 .macro GHASH_4_ENCRYPT_4_PARALLEL_DEC TMP1 TMP2 TMP3 TMP4 TMP5 \
921 movaps 0x30(%arg1), \TMP3
922 AESENC \TMP3, \XMM1 # Round 3
923 AESENC \TMP3, \XMM2
924 AESENC \TMP3, \XMM3
925 AESENC \TMP3, \XMM4
927 movaps 0x40(%arg1), \TMP3
928 AESENC \TMP3, \XMM1 # Round 4
929 AESENC \TMP3, \XMM2
930 AESENC \TMP3, \XMM3
931 AESENC \TMP3, \XMM4
934 movaps 0x50(%arg1), \TMP3
935 AESENC \TMP3, \XMM1 # Round 5
936 AESENC \TMP3, \XMM2
937 AESENC \TMP3, \XMM3
938 AESENC \TMP3, \XMM4
951 movaps 0x60(%arg1), \TMP3
952 AESENC \TMP3, \XMM1 # Round 6
953 AESENC \TMP3, \XMM2
954 AESENC \TMP3, \XMM3
955 AESENC \TMP3, \XMM4
957 movaps 0x70(%arg1), \TMP3
958 AESENC \TMP3, \XMM1 # Round 7
959 AESENC \TMP3, \XMM2
960 AESENC \TMP3, \XMM3
961 AESENC \TMP3, \XMM4
964 movaps 0x80(%arg1), \TMP3
965 AESENC \TMP3, \XMM1 # Round 8
966 AESENC \TMP3, \XMM2
967 AESENC \TMP3, \XMM3
968 AESENC \TMP3, \XMM4
982 movaps 0x90(%arg1), \TMP3
983 AESENC \TMP3, \XMM1 # Round 9
984 AESENC \TMP3, \XMM2
985 AESENC \TMP3, \XMM3
986 AESENC \TMP3, \XMM4
995 MOVADQ (%r10),\TMP3
997 AESENC \TMP3, %xmm\index
1004 MOVADQ (%r10), \TMP3
1005 AESENCLAST \TMP3, \XMM1 # last round
1006 AESENCLAST \TMP3, \XMM2
1007 AESENCLAST \TMP3, \XMM3
1008 AESENCLAST \TMP3, \XMM4
1011 movdqu (%arg3,%r11,1), \TMP3
1012 pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK
1014 movdqa \TMP3, \XMM1
1015 movdqu 16(%arg3,%r11,1), \TMP3
1016 pxor \TMP3, \XMM2 # Ciphertext/Plaintext XOR EK
1018 movdqa \TMP3, \XMM2
1019 movdqu 32(%arg3,%r11,1), \TMP3
1020 pxor \TMP3, \XMM3 # Ciphertext/Plaintext XOR EK
1022 movdqa \TMP3, \XMM3
1023 movdqu 48(%arg3,%r11,1), \TMP3
1024 pxor \TMP3, \XMM4 # Ciphertext/Plaintext XOR EK
1026 movdqa \TMP3, \XMM4
1037 movdqa \TMP2, \TMP3
1038 pslldq $8, \TMP3 # left shift TMP3 2 DWs
1040 pxor \TMP3, \XMM5
1046 movdqa \XMM5, \TMP3
1048 # move XMM5 into TMP2, TMP3, TMP4 in order to perform shifts independently
1050 pslld $30, \TMP3 # packed right shift << 30
1052 pxor \TMP3, \TMP2 # xor the shifted versions
1061 movdqa \XMM5,\TMP2 # make 3 copies of XMM5 into TMP2, TMP3, TMP4
1062 movdqa \XMM5,\TMP3
1065 psrld $2, \TMP3 # packed left shift >>2
1067 pxor \TMP3,\TMP2 # xor the shifted versions
1077 .macro GHASH_LAST_4 TMP1 TMP2 TMP3 TMP4 TMP5 TMP6 \
1145 movdqa \XMMDst, \TMP3
1147 # move XMMDst into TMP2, TMP3, TMP4 in order to perform 3 shifts independently
1149 pslld $30, \TMP3 # packed right shifting << 30
1151 pxor \TMP3, \TMP2 # xor the shifted versions
1161 movdqa \XMMDst, \TMP3
1164 psrld $2, \TMP3 # packed left shift >> 2
1166 pxor \TMP3, \TMP2 # xor the shifted versions