Searched refs:zSig0 (Results 1 - 2 of 2) sorted by relevance

/linux-4.1.27/arch/arm/nwfpe/
H A Dsoftfloat.c542 and extended significand formed by the concatenation of `zSig0' and `zSig1',
566 struct roundingData *roundData, flag zSign, int32 zExp, bits64 zSig0, bits64 zSig1 roundAndPackFloatx80()
588 zSig0 |= ( zSig1 != 0 ); roundAndPackFloatx80()
603 roundBits = zSig0 & roundMask; roundAndPackFloatx80()
606 || ( ( zExp == 0x7FFE ) && ( zSig0 + roundIncrement < zSig0 ) ) roundAndPackFloatx80()
614 || ( zSig0 <= zSig0 + roundIncrement ); roundAndPackFloatx80()
615 shift64RightJamming( zSig0, 1 - zExp, &zSig0 ); roundAndPackFloatx80()
617 roundBits = zSig0 & roundMask; roundAndPackFloatx80()
620 zSig0 += roundIncrement; roundAndPackFloatx80()
621 if ( (sbits64) zSig0 < 0 ) zExp = 1; roundAndPackFloatx80()
626 zSig0 &= ~ roundMask; roundAndPackFloatx80()
627 return packFloatx80( zSign, zExp, zSig0 ); roundAndPackFloatx80()
631 zSig0 += roundIncrement; roundAndPackFloatx80()
632 if ( zSig0 < roundIncrement ) { roundAndPackFloatx80()
634 zSig0 = LIT64( 0x8000000000000000 ); roundAndPackFloatx80()
640 zSig0 &= ~ roundMask; roundAndPackFloatx80()
641 if ( zSig0 == 0 ) zExp = 0; roundAndPackFloatx80()
642 return packFloatx80( zSign, zExp, zSig0 ); roundAndPackFloatx80()
661 && ( zSig0 == LIT64( 0xFFFFFFFFFFFFFFFF ) ) roundAndPackFloatx80()
681 || ( zSig0 < LIT64( 0xFFFFFFFFFFFFFFFF ) ); roundAndPackFloatx80()
682 shift64ExtraRightJamming( zSig0, zSig1, 1 - zExp, &zSig0, &zSig1 ); roundAndPackFloatx80()
698 ++zSig0; roundAndPackFloatx80()
699 zSig0 &= ~ ( ( zSig1 + zSig1 == 0 ) & roundNearestEven ); roundAndPackFloatx80()
700 if ( (sbits64) zSig0 < 0 ) zExp = 1; roundAndPackFloatx80()
702 return packFloatx80( zSign, zExp, zSig0 ); roundAndPackFloatx80()
707 ++zSig0; roundAndPackFloatx80()
708 if ( zSig0 == 0 ) { roundAndPackFloatx80()
710 zSig0 = LIT64( 0x8000000000000000 ); roundAndPackFloatx80()
713 zSig0 &= ~ ( ( zSig1 + zSig1 == 0 ) & roundNearestEven ); roundAndPackFloatx80()
717 if ( zSig0 == 0 ) zExp = 0; roundAndPackFloatx80()
720 return packFloatx80( zSign, zExp, zSig0 ); roundAndPackFloatx80()
726 `zExp', and significand formed by the concatenation of `zSig0' and `zSig1',
735 struct roundingData *roundData, flag zSign, int32 zExp, bits64 zSig0, bits64 zSig1 normalizeRoundAndPackFloatx80()
740 if ( zSig0 == 0 ) { normalizeRoundAndPackFloatx80()
741 zSig0 = zSig1; normalizeRoundAndPackFloatx80()
745 shiftCount = countLeadingZeros64( zSig0 ); normalizeRoundAndPackFloatx80()
746 shortShift128Left( zSig0, zSig1, shiftCount, &zSig0, &zSig1 ); normalizeRoundAndPackFloatx80()
749 roundAndPackFloatx80( roundData, zSign, zExp, zSig0, zSig1 ); normalizeRoundAndPackFloatx80()
2119 bits64 aSig, bSig, zSig0, zSig1; float64_mul() local
2157 mul64To128( aSig, bSig, &zSig0, &zSig1 ); float64_mul()
2158 zSig0 |= ( zSig1 != 0 ); float64_mul()
2159 if ( 0 <= (sbits64) ( zSig0<<1 ) ) { float64_mul()
2160 zSig0 <<= 1; float64_mul()
2163 return roundAndPackFloat64( roundData, zSign, zExp, zSig0 ); float64_mul()
2748 bits64 aSig, bSig, zSig0, zSig1; addFloatx80Sigs() local
2782 zSig0 = aSig + bSig; addFloatx80Sigs()
2784 normalizeFloatx80Subnormal( zSig0, &zExp, &zSig0 ); addFloatx80Sigs()
2791 zSig0 = aSig + bSig; addFloatx80Sigs()
2793 if ( (sbits64) zSig0 < 0 ) goto roundAndPack; addFloatx80Sigs()
2795 shift64ExtraRightJamming( zSig0, zSig1, 1, &zSig0, &zSig1 ); addFloatx80Sigs()
2796 zSig0 |= LIT64( 0x8000000000000000 ); addFloatx80Sigs()
2801 roundData, zSign, zExp, zSig0, zSig1 ); addFloatx80Sigs()
2817 bits64 aSig, bSig, zSig0, zSig1; subFloatx80Sigs() local
2854 sub128( bSig, 0, aSig, zSig1, &zSig0, &zSig1 ); subFloatx80Sigs()
2866 sub128( aSig, 0, bSig, zSig1, &zSig0, &zSig1 ); subFloatx80Sigs()
2871 roundData, zSign, zExp, zSig0, zSig1 ); subFloatx80Sigs()
2930 bits64 aSig, bSig, zSig0, zSig1; floatx80_mul() local
2969 mul64To128( aSig, bSig, &zSig0, &zSig1 ); floatx80_mul()
2970 if ( 0 < (sbits64) zSig0 ) { floatx80_mul()
2971 shortShift128Left( zSig0, zSig1, 1, &zSig0, &zSig1 ); floatx80_mul()
2976 roundData, zSign, zExp, zSig0, zSig1 ); floatx80_mul()
2991 bits64 aSig, bSig, zSig0, zSig1; floatx80_div() local
3039 zSig0 = estimateDiv128To64( aSig, rem1, bSig ); floatx80_div()
3040 mul64To128( bSig, zSig0, &term0, &term1 ); floatx80_div()
3043 --zSig0; floatx80_div()
3058 roundData, zSign, zExp, zSig0, zSig1 ); floatx80_div()
3173 bits64 aSig0, aSig1, zSig0, zSig1; floatx80_sqrt() local
3200 zSig0 = estimateSqrt32( aExp, aSig0>>32 ); floatx80_sqrt()
3201 zSig0 <<= 31; floatx80_sqrt()
3204 zSig0 = estimateDiv128To64( aSig0, aSig1, zSig0 ) + zSig0 + 4; floatx80_sqrt()
3205 if ( 0 <= (sbits64) zSig0 ) zSig0 = LIT64( 0xFFFFFFFFFFFFFFFF ); floatx80_sqrt()
3207 mul64To128( zSig0, zSig0, &term0, &term1 ); floatx80_sqrt()
3210 --zSig0; floatx80_sqrt()
3211 shortShift128Left( 0, zSig0, 1, &term0, &term1 ); floatx80_sqrt()
3216 zSig1 = estimateDiv128To64( shiftedRem0, shiftedRem1, zSig0 ); floatx80_sqrt()
3219 mul64To128( zSig0, zSig1, &term1, &term2 ); floatx80_sqrt()
3226 shortShift192Left( 0, zSig0, zSig1, 1, &term1, &term2, &term3 ); floatx80_sqrt()
3235 roundData, 0, zExp, zSig0, zSig1 ); floatx80_sqrt()
565 roundAndPackFloatx80( struct roundingData *roundData, flag zSign, int32 zExp, bits64 zSig0, bits64 zSig1 ) roundAndPackFloatx80() argument
734 normalizeRoundAndPackFloatx80( struct roundingData *roundData, flag zSign, int32 zExp, bits64 zSig0, bits64 zSig1 ) normalizeRoundAndPackFloatx80() argument
/linux-4.1.27/arch/sh/kernel/cpu/sh4/
H A Dsoftfloat.c869 unsigned long long int aSig, bSig, zSig0, zSig1; float64_mul() local
895 mul64To128(aSig, bSig, &zSig0, &zSig1); float64_mul()
896 zSig0 |= (zSig1 != 0); float64_mul()
897 if (0 <= (signed long long int)(zSig0 << 1)) { float64_mul()
898 zSig0 <<= 1; float64_mul()
901 return roundAndPackFloat64(zSign, zExp, zSig0); float64_mul()

Completed in 145 milliseconds