Lines Matching refs:TMP4
163 .macro GHASH_MUL GH HK TMP1 TMP2 TMP3 TMP4 TMP5
184 movdqa \GH, \TMP4 # copy GH into TMP2,TMP3 and TMP4
189 pslld $25, \TMP4 # packed right shift <<25
191 pxor \TMP4, \TMP2
199 movdqa \GH,\TMP2 # copy GH into TMP2,TMP3 and TMP4
203 movdqa \GH,\TMP4
206 psrld $7,\TMP4 # packed left shift >>7
208 pxor \TMP4,\TMP2
226 .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
303 GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
308 GHASH_MUL %xmm6, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
310 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
312 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
315 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
317 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
320 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
356 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
370 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
383 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
451 .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
524 GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
529 GHASH_MUL %xmm6, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
531 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
533 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
536 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
538 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
541 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
577 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
591 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
604 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
662 .macro GHASH_4_ENCRYPT_4_PARALLEL_ENC TMP1 TMP2 TMP3 TMP4 TMP5 \
673 movdqa \XMM5, \TMP4
678 PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1
731 pxor \TMP1, \TMP4
732 # accumulate the results in TMP4:XMM5, TMP6 holds the middle part
761 pxor \TMP1, \TMP4
762 # accumulate the results in TMP4:XMM5, TMP6 holds the middle part
820 pxor \TMP4, \TMP1
835 movdqa \XMM5, \TMP4
836 # move XMM5 into TMP2, TMP3, TMP4 in order to perform shifts independently
839 pslld $25, \TMP4 # packed right shift << 25
841 pxor \TMP4, \TMP2
849 movdqa \XMM5,\TMP2 # make 3 copies of XMM5 into TMP2, TMP3, TMP4
851 movdqa \XMM5,\TMP4
854 psrld $7, \TMP4 # packed left shift >>7
856 pxor \TMP4,\TMP2
870 .macro GHASH_4_ENCRYPT_4_PARALLEL_DEC TMP1 TMP2 TMP3 TMP4 TMP5 \
881 movdqa \XMM5, \TMP4
886 PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1
939 pxor \TMP1, \TMP4
940 # accumulate the results in TMP4:XMM5, TMP6 holds the middle part
969 pxor \TMP1, \TMP4
970 # accumulate the results in TMP4:XMM5, TMP6 holds the middle part
1032 pxor \TMP4, \TMP1
1047 movdqa \XMM5, \TMP4
1048 # move XMM5 into TMP2, TMP3, TMP4 in order to perform shifts independently
1051 pslld $25, \TMP4 # packed right shift << 25
1053 pxor \TMP4, \TMP2
1061 movdqa \XMM5,\TMP2 # make 3 copies of XMM5 into TMP2, TMP3, TMP4
1063 movdqa \XMM5,\TMP4
1066 psrld $7, \TMP4 # packed left shift >>7
1068 pxor \TMP4,\TMP2
1077 .macro GHASH_LAST_4 TMP1 TMP2 TMP3 TMP4 TMP5 TMP6 \
1088 movdqa HashKey_4_k(%rsp), \TMP4
1089 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1101 movdqa HashKey_3_k(%rsp), \TMP4
1102 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1116 movdqa HashKey_2_k(%rsp), \TMP4
1117 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1129 movdqa HashKey_k(%rsp), \TMP4
1130 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1137 movdqa \TMP2, \TMP4
1138 pslldq $8, \TMP4 # left shift TMP4 2 DWs
1140 pxor \TMP4, \XMMDst
1146 movdqa \XMMDst, \TMP4
1147 # move XMMDst into TMP2, TMP3, TMP4 in order to perform 3 shifts independently
1150 pslld $25, \TMP4 # packed right shifting << 25
1152 pxor \TMP4, \TMP2
1162 movdqa \XMMDst, \TMP4
1165 psrld $7, \TMP4 # packed left shift >> 7
1167 pxor \TMP4, \TMP2