Home
last modified time | relevance | path

Searched refs:t6 (Results 1 – 28 of 28) sorted by relevance

/linux-4.1.27/arch/alpha/lib/
Dstxcpy.S72 negq t8, t6 # e0 : find low bit set
73 and t8, t6, t12 # e1 (stall)
77 and t12, 0x80, t6 # e0 :
78 bne t6, 1f # .. e1 (zdb)
83 subq t12, 1, t6 # .. e1 :
84 zapnot t1, t6, t1 # e0 : clear src bytes >= null
85 or t12, t6, t8 # .. e1 :
141 or t1, t6, t6 # e0 :
142 cmpbge zero, t6, t8 # .. e1 :
143 lda t6, -1 # e0 : for masking just below
[all …]
Dstxncpy.S90 and t12, 0x80, t6 # e0 :
91 bne t6, 1f # .. e1 (zdb)
96 subq t12, 1, t6 # .. e1 :
97 or t12, t6, t8 # e0 :
164 or t0, t6, t6 # e1 : mask original data for zero test
165 cmpbge zero, t6, t8 # e0 :
167 lda t6, -1 # e0 :
170 mskql t6, a1, t6 # e0 : mask out bits already seen
173 or t6, t2, t2 # .. e1 :
242 or t8, t10, t6 # e1 :
[all …]
Dev6-stxcpy.S88 negq t8, t6 # E : find low bit set
89 and t8, t6, t12 # E : (stall)
92 and t12, 0x80, t6 # E : (stall)
93 bne t6, 1f # U : (stall)
98 subq t12, 1, t6 # E :
99 zapnot t1, t6, t1 # U : clear src bytes >= null (stall)
100 or t12, t6, t8 # E : (stall)
164 or t1, t6, t6 # E :
165 cmpbge zero, t6, t8 # E : (stall)
166 lda t6, -1 # E : for masking just below
[all …]
Dev6-stxncpy.S116 and t12, 0x80, t6 # E : (stall)
117 bne t6, 1f # U : (stall)
122 subq t12, 1, t6 # E :
123 or t12, t6, t8 # E : (stall)
204 or t0, t6, t6 # E : mask original data for zero test (stall)
206 cmpbge zero, t6, t8 # E :
208 lda t6, -1 # E :
212 mskql t6, a1, t6 # U : mask out bits already seen
214 or t6, t2, t2 # E : (stall)
287 or t8, t10, t6 # E : (stall)
[all …]
Dstrrchr.S22 mov zero, t6 # .. e1 : t6 is last match aligned addr
45 cmovne t3, v0, t6 # .. e1 : save previous comparisons match
62 cmovne t3, v0, t6 # e0 :
79 addq t6, t0, v0 # .. e1 : add our aligned base ptr to the mix
Dev67-strrchr.S39 mov zero, t6 # E : t6 is last match aligned addr
67 cmovne t3, v0, t6 # E : save previous comparisons match
93 cmovne t3, v0, t6 # E :
104 addq t6, t5, v0 # E : and add to quadword address
/linux-4.1.27/arch/sparc/lib/
Dmemcpy.S17 #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
21 ldd [%src + (offset) + 0x18], %t6; \
28 st %t6, [%dst + (offset) + 0x18]; \
31 #define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
35 ldd [%src + (offset) + 0x18], %t6; \
39 std %t6, [%dst + (offset) + 0x18];
62 #define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
66 ldd [%src - (offset) - 0x08], %t6; \
73 st %t6, [%dst - (offset) - 0x08]; \
76 #define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
[all …]
Dblockops.S26 #define MIRROR_BLOCK(dst, src, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
30 ldd [src + offset + 0x00], t6; \
34 std t6, [dst + offset + 0x00];
Dcopy_user.S67 #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
71 ldd [%src + (offset) + 0x18], %t6; \
78 st %t6, [%dst + (offset) + 0x18]; \
81 #define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
85 ldd [%src + (offset) + 0x18], %t6; \
89 std %t6, [%dst + (offset) + 0x18];
Dchecksum_32.S191 #define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
197 ldd [src + off + 0x18], t6; \
205 std t6, [dst + off + 0x18]; \
206 addxcc t6, sum, sum; \
213 #define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \ argument
217 ldd [src + off + 0x18], t6; \
230 st t6, [dst + off + 0x18]; \
231 addxcc t6, sum, sum; \
/linux-4.1.27/scripts/
Dmakelst28 t6=`printf "%lu" $((0x$t4 - 0x$t5))`
31 $3 -r --source --adjust-vma=${t6:-0} $1
/linux-4.1.27/arch/mips/kernel/
Dcps-vec.S313 has_mt t6, 1f
343 bnez t6, 1f
363 lw t6, COREBOOTCFG_VPEMASK(t0)
364 move t8, t6
368 1: andi t0, t6, 1
426 2: srl t6, t6, 1
428 bnez t6, 1b
Dscall32-o32.S77 4: user_lw(t6, 20(t0)) # argument #6 from usp
82 sw t6, 20(sp) # argument #6 to ksp
197 lw t6, 28(sp)
200 sw t6, 24(sp)
Dpm-cps.c83 t0, t1, t2, t3, t4, t5, t6, t7, enumerator
/linux-4.1.27/arch/x86/crypto/
Dcamellia-aesni-avx-asm_64.S49 #define roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ argument
72 filter_8bit(x0, t0, t1, t7, t6); \
73 filter_8bit(x7, t0, t1, t7, t6); \
74 filter_8bit(x1, t0, t1, t7, t6); \
75 filter_8bit(x4, t0, t1, t7, t6); \
76 filter_8bit(x2, t0, t1, t7, t6); \
77 filter_8bit(x5, t0, t1, t7, t6); \
81 filter_8bit(x3, t2, t3, t7, t6); \
82 filter_8bit(x6, t2, t3, t7, t6); \
99 filter_8bit(x0, t0, t1, t7, t6); \
[all …]
Dcamellia-aesni-avx2-asm_64.S66 #define roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ argument
74 vbroadcasti128 .Lpre_tf_hi_s1, t6; \
90 filter_8bit(x0, t5, t6, t7, t4); \
91 filter_8bit(x7, t5, t6, t7, t4); \
98 filter_8bit(x2, t5, t6, t7, t4); \
99 filter_8bit(x5, t5, t6, t7, t4); \
100 filter_8bit(x1, t5, t6, t7, t4); \
101 filter_8bit(x4, t5, t6, t7, t4); \
106 vextracti128 $1, x2, t6##_x; \
125 vaesenclast t4##_x, t6##_x, t6##_x; \
[all …]
/linux-4.1.27/arch/ia64/lib/
Dcopy_page_mck.S80 #define t6 t2 // alias! macro
85 #define t12 t6 // alias!
156 (p[D]) ld8 t6 = [src0], 3*8
163 (p[D]) st8 [dst0] = t6, 3*8
Dcopy_page.S43 t5[PIPE_DEPTH], t6[PIPE_DEPTH], t7[PIPE_DEPTH], t8[PIPE_DEPTH]
82 (p[0]) ld8 t6[0]=[src2],16
83 (EPI) st8 [tgt2]=t6[PIPE_DEPTH-1],16
Dmemcpy_mck.S48 #define t6 t2 // alias! macro
54 #define t12 t6 // alias!
235 EX(.ex_handler, (p[D]) ld8 t6 = [src0], 3*8)
242 EX(.ex_handler, (p[D]) st8 [dst0] = t6, 3*8)
437 EK(.ex_handler_short, (p8) ld1 t6=[src1],2)
442 EK(.ex_handler_short, (p8) st1 [dst1]=t6,2)
484 EX(.ex_handler_short, (p11) ld1 t6=[src1],2)
491 EX(.ex_handler_short, (p11) st1 [dst1] = t6,2)
/linux-4.1.27/net/bluetooth/
Decc.c660 u64 t6[NUM_ECC_DIGITS]; in xycz_add_c() local
670 vli_mod_sub(t6, x2, x1, curve_p); /* t6 = C - B */ in xycz_add_c()
671 vli_mod_mult_fast(y1, y1, t6); /* t2 = y1 * (C - B) */ in xycz_add_c()
672 vli_mod_add(t6, x1, x2, curve_p); /* t6 = B + C */ in xycz_add_c()
674 vli_mod_sub(x2, x2, t6, curve_p); /* t3 = x3 */ in xycz_add_c()
681 vli_mod_sub(t7, t7, t6, curve_p); /* t7 = x3' */ in xycz_add_c()
682 vli_mod_sub(t6, t7, x1, curve_p); /* t6 = x3' - B */ in xycz_add_c()
683 vli_mod_mult_fast(t6, t6, t5); /* t6 = (y2 + y1)*(x3' - B) */ in xycz_add_c()
684 vli_mod_sub(y1, t6, y1, curve_p); /* t2 = y3' */ in xycz_add_c()
/linux-4.1.27/drivers/media/pci/cx88/
Dcx88-dsp.c79 u32 t2, t4, t6, t8; in int_cos() local
91 t6 = t4*x/32768*x/32768/5/6; in int_cos()
92 t8 = t6*x/32768*x/32768/7/8; in int_cos()
93 ret = 32768-t2+t4-t6+t8; in int_cos()
/linux-4.1.27/arch/alpha/include/uapi/asm/
Dregdef.h12 #define t6 $7 macro
/linux-4.1.27/arch/mips/lib/
Dmemcpy.S176 #define t6 $14 macro
564 bnez t6, .Ldone\@ /* Skip the zeroing part if inatomic */
678 li t6, 1
692 li t6, 0 /* not inatomic */
708 li t6, 1
716 li t6, 0 /* not inatomic */
Dcsum_partial.S34 #define t6 $14 macro
502 LOAD(t6, UNIT(6)(src), .Ll_exc_copy\@)
518 STORE(t6, UNIT(6)(dst), .Ls_exc\@)
519 ADDC(t6, t7)
521 ADDC(sum, t6)
/linux-4.1.27/arch/mips/include/asm/
Dregdef.h38 #define t6 $14 macro
/linux-4.1.27/arch/tile/kernel/
Dhvglue_trace.c157 #define __HV_DECL6(t6, a6, ...) t6 a6, __HV_DECL5(__VA_ARGS__) argument
166 #define __HV_PASS6(t6, a6, ...) a6, __HV_PASS5(__VA_ARGS__) argument
/linux-4.1.27/lib/
Dcrc32.c63 # define DO_CRC8 (t7[(q) & 255] ^ t6[(q >> 8) & 255] ^ \
70 t6[(q >> 16) & 255] ^ t7[(q >> 24) & 255])
79 const u32 *t4 = tab[4], *t5 = tab[5], *t6 = tab[6], *t7 = tab[7]; local
/linux-4.1.27/arch/mips/cavium-octeon/
Docteon-memcpy.S114 #define t6 $14 macro