Lines Matching refs:TMP2
163 .macro GHASH_MUL GH HK TMP1 TMP2 TMP3 TMP4 TMP5
165 pshufd $78, \GH, \TMP2
167 pxor \GH, \TMP2 # TMP2 = a1+a0
171 PCLMULQDQ 0x00, \TMP3, \TMP2 # TMP2 = (a0+a1)*(b1+b0)
172 pxor \GH, \TMP2
173 pxor \TMP1, \TMP2 # TMP2 = (a0*b0)+(a1*b0)
174 movdqa \TMP2, \TMP3
176 psrldq $8, \TMP2 # right shift TMP2 2 DWs
178 pxor \TMP2, \TMP1 # TMP2:GH holds the result of GH*HK
182 movdqa \GH, \TMP2
184 movdqa \GH, \TMP4 # copy GH into TMP2,TMP3 and TMP4
187 pslld $31, \TMP2 # packed right shift <<31
190 pxor \TMP3, \TMP2 # xor the shifted versions
191 pxor \TMP4, \TMP2
192 movdqa \TMP2, \TMP5
194 pslldq $12, \TMP2 # left shift TMP2 3 DWs
195 pxor \TMP2, \GH
199 movdqa \GH,\TMP2 # copy GH into TMP2,TMP3 and TMP4
204 psrld $1,\TMP2 # packed left shift >>1
207 pxor \TMP3,\TMP2 # xor the shifted versions
208 pxor \TMP4,\TMP2
209 pxor \TMP5, \TMP2
210 pxor \TMP2, \GH
226 .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
266 MOVADQ (%arg1),\TMP2
271 pxor \TMP2, %xmm\index
303 GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
308 GHASH_MUL %xmm6, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
310 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
312 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
315 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
317 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
320 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
356 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
370 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
383 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
396 MOVADQ (%r10),\TMP2
398 AESENC \TMP2, %xmm\index
405 MOVADQ (%r10), \TMP2
406 AESENCLAST \TMP2, \XMM1
407 AESENCLAST \TMP2, \XMM2
408 AESENCLAST \TMP2, \XMM3
409 AESENCLAST \TMP2, \XMM4
451 .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
488 MOVADQ 0(%arg1),\TMP2
493 pxor \TMP2, %xmm\index
524 GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
529 GHASH_MUL %xmm6, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
531 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
533 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
536 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
538 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
541 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
577 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
591 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
604 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
617 MOVADQ (%r10),\TMP2
619 AESENC \TMP2, %xmm\index
626 MOVADQ (%r10), \TMP2
627 AESENCLAST \TMP2, \XMM1
628 AESENCLAST \TMP2, \XMM2
629 AESENCLAST \TMP2, \XMM3
630 AESENCLAST \TMP2, \XMM4
662 .macro GHASH_4_ENCRYPT_4_PARALLEL_ENC TMP1 TMP2 TMP3 TMP4 TMP5 \
709 pshufd $78, \XMM6, \TMP2
710 pxor \XMM6, \TMP2
725 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
734 pxor \TMP2, \TMP6
736 pshufd $78, \XMM7, \TMP2
737 pxor \XMM7, \TMP2
755 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
764 pxor \TMP2, \TMP6
770 pshufd $78, \XMM8, \TMP2
771 pxor \XMM8, \TMP2
802 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
822 pxor \TMP6, \TMP2
823 pxor \TMP1, \TMP2
824 pxor \XMM5, \TMP2
825 movdqa \TMP2, \TMP3
827 psrldq $8, \TMP2 # right shift TMP2 2 DWs
829 pxor \TMP2, \TMP1 # accumulate the results in TMP1:XMM5
833 movdqa \XMM5, \TMP2
836 # move XMM5 into TMP2, TMP3, TMP4 in order to perform shifts independently
837 pslld $31, \TMP2 # packed right shift << 31
840 pxor \TMP3, \TMP2 # xor the shifted versions
841 pxor \TMP4, \TMP2
842 movdqa \TMP2, \TMP5
844 pslldq $12, \TMP2 # left shift T2 3 DWs
845 pxor \TMP2, \XMM5
849 movdqa \XMM5,\TMP2 # make 3 copies of XMM5 into TMP2, TMP3, TMP4
852 psrld $1, \TMP2 # packed left shift >>1
855 pxor \TMP3,\TMP2 # xor the shifted versions
856 pxor \TMP4,\TMP2
857 pxor \TMP5, \TMP2
858 pxor \TMP2, \XMM5
870 .macro GHASH_4_ENCRYPT_4_PARALLEL_DEC TMP1 TMP2 TMP3 TMP4 TMP5 \
917 pshufd $78, \XMM6, \TMP2
918 pxor \XMM6, \TMP2
933 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
942 pxor \TMP2, \TMP6
944 pshufd $78, \XMM7, \TMP2
945 pxor \XMM7, \TMP2
963 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
972 pxor \TMP2, \TMP6
978 pshufd $78, \XMM8, \TMP2
979 pxor \XMM8, \TMP2
1010 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1034 pxor \TMP6, \TMP2
1035 pxor \TMP1, \TMP2
1036 pxor \XMM5, \TMP2
1037 movdqa \TMP2, \TMP3
1039 psrldq $8, \TMP2 # right shift TMP2 2 DWs
1041 pxor \TMP2, \TMP1 # accumulate the results in TMP1:XMM5
1045 movdqa \XMM5, \TMP2
1048 # move XMM5 into TMP2, TMP3, TMP4 in order to perform shifts independently
1049 pslld $31, \TMP2 # packed right shift << 31
1052 pxor \TMP3, \TMP2 # xor the shifted versions
1053 pxor \TMP4, \TMP2
1054 movdqa \TMP2, \TMP5
1056 pslldq $12, \TMP2 # left shift T2 3 DWs
1057 pxor \TMP2, \XMM5
1061 movdqa \XMM5,\TMP2 # make 3 copies of XMM5 into TMP2, TMP3, TMP4
1064 psrld $1, \TMP2 # packed left shift >>1
1067 pxor \TMP3,\TMP2 # xor the shifted versions
1068 pxor \TMP4,\TMP2
1069 pxor \TMP5, \TMP2
1070 pxor \TMP2, \XMM5
1077 .macro GHASH_LAST_4 TMP1 TMP2 TMP3 TMP4 TMP5 TMP6 \
1083 pshufd $78, \XMM1, \TMP2
1084 pxor \XMM1, \TMP2
1089 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1091 movdqa \TMP2, \XMM1 # result in TMP6, XMMDst, XMM1
1096 pshufd $78, \XMM2, \TMP2
1097 pxor \XMM2, \TMP2
1102 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1105 pxor \TMP2, \XMM1
1111 pshufd $78, \XMM3, \TMP2
1112 pxor \XMM3, \TMP2
1117 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1120 pxor \TMP2, \XMM1 # results accumulated in TMP6, XMMDst, XMM1
1124 pshufd $78, \XMM4, \TMP2
1125 pxor \XMM4, \TMP2
1130 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1133 pxor \XMM1, \TMP2
1134 pxor \TMP6, \TMP2
1135 pxor \XMMDst, \TMP2
1137 movdqa \TMP2, \TMP4
1139 psrldq $8, \TMP2 # right shift TMP2 2 DWs
1141 pxor \TMP2, \TMP6
1144 movdqa \XMMDst, \TMP2
1147 # move XMMDst into TMP2, TMP3, TMP4 in order to perform 3 shifts independently
1148 pslld $31, \TMP2 # packed right shifting << 31
1151 pxor \TMP3, \TMP2 # xor the shifted versions
1152 pxor \TMP4, \TMP2
1153 movdqa \TMP2, \TMP7
1155 pslldq $12, \TMP2 # left shift TMP2 3 DWs
1156 pxor \TMP2, \XMMDst
1159 movdqa \XMMDst, \TMP2
1163 psrld $1, \TMP2 # packed left shift >> 1
1166 pxor \TMP3, \TMP2 # xor the shifted versions
1167 pxor \TMP4, \TMP2
1168 pxor \TMP7, \TMP2
1169 pxor \TMP2, \XMMDst