1 ########################################################################
2
3 #
4
5 #
6
7
8
9
10
11 #
12
13
14
15
16
17 #
18
19
20
21 #
22 # * Redistributions of source code must retain the above copyright
23
24 #
25 # * Redistributions in binary form must reproduce the above copyright
26
27
28
29 #
30 # * Neither the name of the Intel Corporation nor the names of its
31
32
33 #
34 #
35
36
37
38
39
40
41
42
43
44
45
46 #
47
48
49
50 # /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
51 #
52
53 #include <linux/linkage.h>
54
55 .text
56
57 #define init_crc %edi
58 #define buf %rsi
59 #define len %rdx
60
61 #define FOLD_CONSTS %xmm10
62 #define BSWAP_MASK %xmm11
63
64
65
66 .macro fold_32_bytes offset, reg1, reg2
67 movdqu \offset(buf), %xmm9
68 movdqu \offset+16(buf), %xmm12
69 pshufb BSWAP_MASK, %xmm9
70 pshufb BSWAP_MASK, %xmm12
71 movdqa \reg1, %xmm8
72 movdqa \reg2, %xmm13
73 pclmulqdq $0x00, FOLD_CONSTS, \reg1
74 pclmulqdq $0x11, FOLD_CONSTS, %xmm8
75 pclmulqdq $0x00, FOLD_CONSTS, \reg2
76 pclmulqdq $0x11, FOLD_CONSTS, %xmm13
77 pxor %xmm9 , \reg1
78 xorps %xmm8 , \reg1
79 pxor %xmm12, \reg2
80 xorps %xmm13, \reg2
81 .endm
82
83
84 .macro fold_16_bytes src_reg, dst_reg
85 movdqa \src_reg, %xmm8
86 pclmulqdq $0x11, FOLD_CONSTS, \src_reg
87 pclmulqdq $0x00, FOLD_CONSTS, %xmm8
88 pxor %xmm8, \dst_reg
89 xorps \src_reg, \dst_reg
90 .endm
91
92 #
93
94 #
95
96 #
97 .align 16
98 ENTRY(crc_t10dif_pcl)
99
100 movdqa .Lbswap_mask(%rip), BSWAP_MASK
101
102
103 cmp $256, len
104 jl .Lless_than_256_bytes
105
106
107
108 movdqu 16*0(buf), %xmm0
109 movdqu 16*1(buf), %xmm1
110 movdqu 16*2(buf), %xmm2
111 movdqu 16*3(buf), %xmm3
112 movdqu 16*4(buf), %xmm4
113 movdqu 16*5(buf), %xmm5
114 movdqu 16*6(buf), %xmm6
115 movdqu 16*7(buf), %xmm7
116 add $128, buf
117 pshufb BSWAP_MASK, %xmm0
118 pshufb BSWAP_MASK, %xmm1
119 pshufb BSWAP_MASK, %xmm2
120 pshufb BSWAP_MASK, %xmm3
121 pshufb BSWAP_MASK, %xmm4
122 pshufb BSWAP_MASK, %xmm5
123 pshufb BSWAP_MASK, %xmm6
124 pshufb BSWAP_MASK, %xmm7
125
126
127 pxor %xmm8, %xmm8
128 pinsrw $7, init_crc, %xmm8
129 pxor %xmm8, %xmm0
130
131 movdqa .Lfold_across_128_bytes_consts(%rip), FOLD_CONSTS
132
133
134 # 128 to simplify the termination condition of the following loop.
135 sub $256, len
136
137
138
139 .Lfold_128_bytes_loop:
140 fold_32_bytes 0, %xmm0, %xmm1
141 fold_32_bytes 32, %xmm2, %xmm3
142 fold_32_bytes 64, %xmm4, %xmm5
143 fold_32_bytes 96, %xmm6, %xmm7
144 add $128, buf
145 sub $128, len
146 jge .Lfold_128_bytes_loop
147
148
149
150
151 movdqa .Lfold_across_64_bytes_consts(%rip), FOLD_CONSTS
152 fold_16_bytes %xmm0, %xmm4
153 fold_16_bytes %xmm1, %xmm5
154 fold_16_bytes %xmm2, %xmm6
155 fold_16_bytes %xmm3, %xmm7
156
157 movdqa .Lfold_across_32_bytes_consts(%rip), FOLD_CONSTS
158 fold_16_bytes %xmm4, %xmm6
159 fold_16_bytes %xmm5, %xmm7
160
161 movdqa .Lfold_across_16_bytes_consts(%rip), FOLD_CONSTS
162 fold_16_bytes %xmm6, %xmm7
163
164
165 # (not counting xmm7), following the previous extra subtraction by 128.
166
167
168 add $128-16, len
169
170
171
172 jl .Lfold_16_bytes_loop_done
173 .Lfold_16_bytes_loop:
174 movdqa %xmm7, %xmm8
175 pclmulqdq $0x11, FOLD_CONSTS, %xmm7
176 pclmulqdq $0x00, FOLD_CONSTS, %xmm8
177 pxor %xmm8, %xmm7
178 movdqu (buf), %xmm0
179 pshufb BSWAP_MASK, %xmm0
180 pxor %xmm0 , %xmm7
181 add $16, buf
182 sub $16, len
183 jge .Lfold_16_bytes_loop
184
185 .Lfold_16_bytes_loop_done:
186
187 # (not counting xmm7), following the previous extra subtraction by 16.
188 add $16, len
189 je .Lreduce_final_16_bytes
190
191 .Lhandle_partial_segment:
192
193
194
195
196
197
198 movdqa %xmm7, %xmm2
199
200
201 movdqu -16(buf, len), %xmm1
202 pshufb BSWAP_MASK, %xmm1
203
204
205 lea .Lbyteshift_table+16(%rip), %rax
206 sub len, %rax
207 movdqu (%rax), %xmm0
208 pshufb %xmm0, %xmm2
209
210
211 pxor .Lmask1(%rip), %xmm0
212 pshufb %xmm0, %xmm7
213
214
215
216 pblendvb %xmm2, %xmm1 #xmm0 is implicit
217
218
219 movdqa %xmm7, %xmm8
220 pclmulqdq $0x11, FOLD_CONSTS, %xmm7
221 pclmulqdq $0x00, FOLD_CONSTS, %xmm8
222 pxor %xmm8, %xmm7
223 pxor %xmm1, %xmm7
224
225 .Lreduce_final_16_bytes:
226
227
228
229 movdqa .Lfinal_fold_consts(%rip), FOLD_CONSTS
230
231
232
233
234 movdqa %xmm7, %xmm0
235 pclmulqdq $0x11, FOLD_CONSTS, %xmm7 # high bits * x^48 * (x^80 mod G(x))
236 pslldq $8, %xmm0
237 pxor %xmm0, %xmm7 # + low bits * x^64
238
239
240
241 movdqa %xmm7, %xmm0
242 pand .Lmask2(%rip), %xmm0 # zero high 32 bits
243 psrldq $12, %xmm7 # extract high 32 bits
244 pclmulqdq $0x00, FOLD_CONSTS, %xmm7 # high 32 bits * x^48 * (x^48 mod G(x))
245 pxor %xmm0, %xmm7 # + low bits
246
247
248 movdqa .Lbarrett_reduction_consts(%rip), FOLD_CONSTS
249
250
251 movdqa %xmm7, %xmm0
252 pclmulqdq $0x11, FOLD_CONSTS, %xmm7 # high 32 bits * floor(x^48 / G(x))
253 psrlq $32, %xmm7 # /= x^32
254 pclmulqdq $0x00, FOLD_CONSTS, %xmm7 # *= G(x)
255 psrlq $48, %xmm0
256 pxor %xmm7, %xmm0 # + low 16 nonzero bits
257
258
259 pextrw $0, %xmm0, %eax
260 ret
261
262 .align 16
263 .Lless_than_256_bytes:
264
265
266
267 movdqu (buf), %xmm7
268 pshufb BSWAP_MASK, %xmm7
269 add $16, buf
270
271
272 pxor %xmm0, %xmm0
273 pinsrw $7, init_crc, %xmm0
274 pxor %xmm0, %xmm7
275
276 movdqa .Lfold_across_16_bytes_consts(%rip), FOLD_CONSTS
277 cmp $16, len
278 je .Lreduce_final_16_bytes # len == 16
279 sub $32, len
280 jge .Lfold_16_bytes_loop # 32 <= len <= 255
281 add $16, len
282 jmp .Lhandle_partial_segment # 17 <= len <= 31
283 ENDPROC(crc_t10dif_pcl)
284
285 .section .rodata, "a", @progbits
286 .align 16
287
288
289
290 .Lfold_across_128_bytes_consts:
291 .quad 0x0000000000006123 # x^(8*128) mod G(x)
292 .quad 0x0000000000002295 # x^(8*128+64) mod G(x)
293 .Lfold_across_64_bytes_consts:
294 .quad 0x0000000000001069 # x^(4*128) mod G(x)
295 .quad 0x000000000000dd31 # x^(4*128+64) mod G(x)
296 .Lfold_across_32_bytes_consts:
297 .quad 0x000000000000857d # x^(2*128) mod G(x)
298 .quad 0x0000000000007acc # x^(2*128+64) mod G(x)
299 .Lfold_across_16_bytes_consts:
300 .quad 0x000000000000a010 # x^(1*128) mod G(x)
301 .quad 0x0000000000001faa # x^(1*128+64) mod G(x)
302 .Lfinal_fold_consts:
303 .quad 0x1368000000000000 # x^48 * (x^48 mod G(x))
304 .quad 0x2d56000000000000 # x^48 * (x^80 mod G(x))
305 .Lbarrett_reduction_consts:
306 .quad 0x0000000000018bb7 # G(x)
307 .quad 0x00000001f65a57f8 # floor(x^48 / G(x))
308
309 .section .rodata.cst16.mask1, "aM", @progbits, 16
310 .align 16
311 .Lmask1:
312 .octa 0x80808080808080808080808080808080
313
314 .section .rodata.cst16.mask2, "aM", @progbits, 16
315 .align 16
316 .Lmask2:
317 .octa 0x00000000FFFFFFFFFFFFFFFFFFFFFFFF
318
319 .section .rodata.cst16.bswap_mask, "aM", @progbits, 16
320 .align 16
321 .Lbswap_mask:
322 .octa 0x000102030405060708090A0B0C0D0E0F
323
324 .section .rodata.cst32.byteshift_table, "aM", @progbits, 32
325 .align 16
326
327
328 # 0x80} XOR the index vector to shift right by '16 - len' bytes.
329 .Lbyteshift_table:
330 .byte 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87
331 .byte 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f
332 .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
333 .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe , 0x0