Lines Matching refs:o1

42 	be	1f				! caller asks %o1 & 0x8
43 andcc %o1, 4, %g0 ! nope, check for word remaining
49 andcc %o1, 4, %g0 ! check again for word remaining
51 andcc %o1, 3, %o1 ! check for trailing bytes
56 andcc %o1, 3, %g0 ! check again for trailing bytes
58 addcc %o1, -1, %g0 ! only one byte remains?
60 subcc %o1, 2, %o1 ! only two bytes more?
76 cmp %o1, 6
82 sub %o1, 2, %o1
94 andcc %o1, 0xffffff80, %o3
96 sub %o1, 4, %o1
101 andcc %o1, 0xffffff80, %o3
111 andcc %o1, 0xffffff80, %o3 ! num loop iterations
113 andcc %o1, 0x70, %g1 ! clears carry flag too
122 andcc %o1, 0x70, %g1 ! clears carry flag too
124 andcc %o1, 0xf, %g0 ! anything left at all?
138 andcc %o1, 0xf, %g0 ! anything left at all?
140 andcc %o1, 8, %g0 ! check how much
253 add %o1, 8, %o1
257 EX2(st %g2, [%o1 - 0x08])
260 EX2(st %g3, [%o1 - 0x04])
264 add %o1, 4, %o1
266 EX2(st %g2, [%o1 - 0x04])
278 EX2(sth %o4, [%o1 + 0x00])
280 add %o1, 2, %o1
283 EX2(stb %o5, [%o1 + 0x00])
298 andcc %o3, %o0, %g0 ! Check %o0 only (%o1 has the same last 2 bits)
308 EX2(sth %g4, [%o1 + 0x00])
312 add %o1, 2, %o1
324 EX2(st %g4, [%o1 + 0x00])
327 add %o1, 4, %o1
340 xor %o0, %o1, %o4 ! get changing bits
347 andcc %o1, 4, %g0 ! dest aligned on 4 or 8 byte boundary?
349 5: CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
350 CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
351 CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
352 CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
359 add %o1, 128, %o1 ! advance dest ptr
370 add %o1, %o2, %o1 ! advance dest ptr (carry is clear btw)
371 cctbl: CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x68,%g2,%g3,%g4,%g5)
372 CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x58,%g2,%g3,%g4,%g5)
373 CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x48,%g2,%g3,%g4,%g5)
374 CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x38,%g2,%g3,%g4,%g5)
375 CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x28,%g2,%g3,%g4,%g5)
376 CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x18,%g2,%g3,%g4,%g5)
377 CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x08,%g2,%g3,%g4,%g5)
385 ccdbl: CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
386 CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
387 CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
388 CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
395 add %o1, 128, %o1 ! advance dest ptr
408 EX2(stb %g5, [%o1])
410 add %o1, 1, %o1
421 EX2(stb %g2, [%o1])
423 EX2(stb %o4, [%o1 + 1])
426 add %o1, 2, %o1
433 EX2(stb %g2, [%o1])
435 EX2(stb %g3, [%o1 + 1])
437 EX2(stb %g2, [%o1 + 2])
439 EX2(stb %o4, [%o1 + 3])
441 add %o1, 4, %o1 ! is worthy). Maybe some day - with the sll/srl
456 EX2(stb %g2, [%o1])
458 EX2(stb %o4, [%o1 + 1])
459 add %o1, 2, %o1
464 EX2(stb %g2, [%o1])
524 add %o1, 16, %o1
529 sub %o1, 0x70, %o1
557 mov %i7, %o1
566 mov %i0, %o1
579 mov %i3, %o1