bits64 70 arch/arm/nwfpe/softfloat.c static int32 roundAndPackInt32( struct roundingData *roundData, flag zSign, bits64 absZ ) bits64 294 arch/arm/nwfpe/softfloat.c INLINE bits64 extractFloat64Frac( float64 a ) bits64 336 arch/arm/nwfpe/softfloat.c normalizeFloat64Subnormal( bits64 aSig, int16 *zExpPtr, bits64 *zSigPtr ) bits64 358 arch/arm/nwfpe/softfloat.c INLINE float64 packFloat64( flag zSign, int16 zExp, bits64 zSig ) bits64 361 arch/arm/nwfpe/softfloat.c return ( ( (bits64) zSign )<<63 ) + ( ( (bits64) zExp )<<52 ) + zSig; bits64 388 arch/arm/nwfpe/softfloat.c static float64 roundAndPackFloat64( struct roundingData *roundData, flag zSign, int16 zExp, bits64 zSig ) bits64 453 arch/arm/nwfpe/softfloat.c normalizeRoundAndPackFloat64( struct roundingData *roundData, flag zSign, int16 zExp, bits64 zSig ) bits64 470 arch/arm/nwfpe/softfloat.c INLINE bits64 extractFloatx80Frac( floatx80 a ) bits64 512 arch/arm/nwfpe/softfloat.c normalizeFloatx80Subnormal( bits64 aSig, int32 *zExpPtr, bits64 *zSigPtr ) bits64 528 arch/arm/nwfpe/softfloat.c INLINE floatx80 packFloatx80( flag zSign, int32 zExp, bits64 zSig ) bits64 566 arch/arm/nwfpe/softfloat.c struct roundingData *roundData, flag zSign, int32 zExp, bits64 zSig0, bits64 zSig1 bits64 735 arch/arm/nwfpe/softfloat.c struct roundingData *roundData, flag zSign, int32 zExp, bits64 zSig0, bits64 zSig1 bits64 785 arch/arm/nwfpe/softfloat.c bits64 zSig; bits64 811 arch/arm/nwfpe/softfloat.c bits64 zSig; bits64 840 arch/arm/nwfpe/softfloat.c bits64 zSig; bits64 922 arch/arm/nwfpe/softfloat.c return packFloat64( aSign, aExp + 0x380, ( (bits64) aSig )<<29 ); bits64 954 arch/arm/nwfpe/softfloat.c return packFloatx80( aSign, aExp + 0x3F80, ( (bits64) aSig )<<40 ); bits64 1224 arch/arm/nwfpe/softfloat.c bits64 zSig64; bits64 1263 arch/arm/nwfpe/softfloat.c shift64RightJamming( ( (bits64) aSig ) * bSig, 32, &zSig64 ); bits64 1329 arch/arm/nwfpe/softfloat.c bits64 tmp = ( (bits64) aSig )<<32; bits64 1334 arch/arm/nwfpe/softfloat.c zSig |= ( ( (bits64) bSig ) * zSig != ( (bits64) aSig )<<32 ); bits64 1353 arch/arm/nwfpe/softfloat.c bits64 aSig64, bSig64, q64; bits64 1398 arch/arm/nwfpe/softfloat.c bits64 tmp = ( (bits64) aSig )<<32; bits64 1412 arch/arm/nwfpe/softfloat.c aSig64 = ( (bits64) aSig )<<40; bits64 1413 arch/arm/nwfpe/softfloat.c bSig64 = ( (bits64) bSig )<<40; bits64 1455 arch/arm/nwfpe/softfloat.c bits64 rem, term; bits64 1484 arch/arm/nwfpe/softfloat.c term = ( (bits64) zSig ) * zSig; bits64 1485 arch/arm/nwfpe/softfloat.c rem = ( ( (bits64) aSig )<<32 ) - term; bits64 1488 arch/arm/nwfpe/softfloat.c rem += ( ( (bits64) zSig )<<1 ) | 1; bits64 1656 arch/arm/nwfpe/softfloat.c bits64 aSig; bits64 1684 arch/arm/nwfpe/softfloat.c bits64 aSig, savedASig; bits64 1731 arch/arm/nwfpe/softfloat.c bits64 aSig; bits64 1757 arch/arm/nwfpe/softfloat.c bits64 aSig, savedASig; bits64 1800 arch/arm/nwfpe/softfloat.c bits64 aSig; bits64 1834 arch/arm/nwfpe/softfloat.c bits64 aSig; bits64 1867 arch/arm/nwfpe/softfloat.c bits64 lastBitMask, roundBitsMask; bits64 1879 arch/arm/nwfpe/softfloat.c if ( (bits64) ( a<<1 ) == 0 ) return a; bits64 1928 arch/arm/nwfpe/softfloat.c bits64 aSig, bSig, zSig; bits64 2000 arch/arm/nwfpe/softfloat.c bits64 aSig, bSig, zSig; bits64 2119 arch/arm/nwfpe/softfloat.c bits64 aSig, bSig, zSig0, zSig1; bits64 2178 arch/arm/nwfpe/softfloat.c bits64 aSig, bSig, zSig; bits64 2179 arch/arm/nwfpe/softfloat.c bits64 rem0, rem1; bits64 2180 arch/arm/nwfpe/softfloat.c bits64 term0, term1; bits64 2249 arch/arm/nwfpe/softfloat.c bits64 aSig, bSig; bits64 2250 arch/arm/nwfpe/softfloat.c bits64 q, alternateASig; bits64 2335 arch/arm/nwfpe/softfloat.c bits64 aSig, zSig; bits64 2336 arch/arm/nwfpe/softfloat.c bits64 rem0, rem1, term0, term1; //, shiftedRem; bits64 2403 arch/arm/nwfpe/softfloat.c return ( a == b ) || ( (bits64) ( ( a | b )<<1 ) == 0 ); bits64 2427 arch/arm/nwfpe/softfloat.c if ( aSign != bSign ) return aSign || ( (bits64) ( ( a | b )<<1 ) == 0 ); bits64 2451 arch/arm/nwfpe/softfloat.c if ( aSign != bSign ) return aSign && ( (bits64) ( ( a | b )<<1 ) != 0 ); bits64 2473 arch/arm/nwfpe/softfloat.c return ( a == b ) || ( (bits64) ( ( a | b )<<1 ) == 0 ); bits64 2498 arch/arm/nwfpe/softfloat.c if ( aSign != bSign ) return aSign || ( (bits64) ( ( a | b )<<1 ) == 0 ); bits64 2523 arch/arm/nwfpe/softfloat.c if ( aSign != bSign ) return aSign && ( (bits64) ( ( a | b )<<1 ) != 0 ); bits64 2545 arch/arm/nwfpe/softfloat.c bits64 aSig; bits64 2550 arch/arm/nwfpe/softfloat.c if ( ( aExp == 0x7FFF ) && (bits64) ( aSig<<1 ) ) aSign = 0; bits64 2573 arch/arm/nwfpe/softfloat.c bits64 aSig, savedASig; bits64 2581 arch/arm/nwfpe/softfloat.c if ( ( aExp == 0x7FFF ) && (bits64) ( aSig<<1 ) ) aSign = 0; bits64 2616 arch/arm/nwfpe/softfloat.c bits64 aSig; bits64 2622 arch/arm/nwfpe/softfloat.c if ( (bits64) ( aSig<<1 ) ) { bits64 2645 arch/arm/nwfpe/softfloat.c bits64 aSig, zSig; bits64 2651 arch/arm/nwfpe/softfloat.c if ( (bits64) ( aSig<<1 ) ) { bits64 2674 arch/arm/nwfpe/softfloat.c bits64 lastBitMask, roundBitsMask; bits64 2680 arch/arm/nwfpe/softfloat.c if ( ( aExp == 0x7FFF ) && (bits64) ( extractFloatx80Frac( a )<<1 ) ) { bits64 2687 arch/arm/nwfpe/softfloat.c && ( (bits64) ( extractFloatx80Frac( a )<<1 ) == 0 ) ) { bits64 2694 arch/arm/nwfpe/softfloat.c if ( ( aExp == 0x3FFE ) && (bits64) ( extractFloatx80Frac( a )<<1 ) bits64 2748 arch/arm/nwfpe/softfloat.c bits64 aSig, bSig, zSig0, zSig1; bits64 2758 arch/arm/nwfpe/softfloat.c if ( (bits64) ( aSig<<1 ) ) return propagateFloatx80NaN( a, b ); bits64 2767 arch/arm/nwfpe/softfloat.c if ( (bits64) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b ); bits64 2776 arch/arm/nwfpe/softfloat.c if ( (bits64) ( ( aSig | bSig )<<1 ) ) { bits64 2817 arch/arm/nwfpe/softfloat.c bits64 aSig, bSig, zSig0, zSig1; bits64 2829 arch/arm/nwfpe/softfloat.c if ( (bits64) ( ( aSig | bSig )<<1 ) ) { bits64 2848 arch/arm/nwfpe/softfloat.c if ( (bits64) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b ); bits64 2860 arch/arm/nwfpe/softfloat.c if ( (bits64) ( aSig<<1 ) ) return propagateFloatx80NaN( a, b ); bits64 2930 arch/arm/nwfpe/softfloat.c bits64 aSig, bSig, zSig0, zSig1; bits64 2941 arch/arm/nwfpe/softfloat.c if ( (bits64) ( aSig<<1 ) bits64 2942 arch/arm/nwfpe/softfloat.c || ( ( bExp == 0x7FFF ) && (bits64) ( bSig<<1 ) ) ) { bits64 2949 arch/arm/nwfpe/softfloat.c if ( (bits64) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b ); bits64 2991 arch/arm/nwfpe/softfloat.c bits64 aSig, bSig, zSig0, zSig1; bits64 2992 arch/arm/nwfpe/softfloat.c bits64 rem0, rem1, rem2, term0, term1, term2; bits64 3003 arch/arm/nwfpe/softfloat.c if ( (bits64) ( aSig<<1 ) ) return propagateFloatx80NaN( a, b ); bits64 3005 arch/arm/nwfpe/softfloat.c if ( (bits64) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b ); bits64 3011 arch/arm/nwfpe/softfloat.c if ( (bits64) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b ); bits64 3047 arch/arm/nwfpe/softfloat.c if ( (bits64) ( zSig1<<1 ) <= 8 ) { bits64 3073 arch/arm/nwfpe/softfloat.c bits64 aSig0, aSig1, bSig; bits64 3074 arch/arm/nwfpe/softfloat.c bits64 q, term0, term1, alternateASig0, alternateASig1; bits64 3084 arch/arm/nwfpe/softfloat.c if ( (bits64) ( aSig0<<1 ) bits64 3085 arch/arm/nwfpe/softfloat.c || ( ( bExp == 0x7FFF ) && (bits64) ( bSig<<1 ) ) ) { bits64 3091 arch/arm/nwfpe/softfloat.c if ( (bits64) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b ); bits64 3106 arch/arm/nwfpe/softfloat.c if ( (bits64) ( aSig0<<1 ) == 0 ) return a; bits64 3173 arch/arm/nwfpe/softfloat.c bits64 aSig0, aSig1, zSig0, zSig1; bits64 3174 arch/arm/nwfpe/softfloat.c bits64 rem0, rem1, rem2, rem3, term0, term1, term2, term3; bits64 3175 arch/arm/nwfpe/softfloat.c bits64 shiftedRem0, shiftedRem1; bits64 3182 arch/arm/nwfpe/softfloat.c if ( (bits64) ( aSig0<<1 ) ) return propagateFloatx80NaN( a, a ); bits64 3217 arch/arm/nwfpe/softfloat.c if ( (bits64) ( zSig1<<1 ) <= 10 ) { bits64 3251 arch/arm/nwfpe/softfloat.c && (bits64) ( extractFloatx80Frac( a )<<1 ) ) bits64 3253 arch/arm/nwfpe/softfloat.c && (bits64) ( extractFloatx80Frac( b )<<1 ) ) bits64 3283 arch/arm/nwfpe/softfloat.c && (bits64) ( extractFloatx80Frac( a )<<1 ) ) bits64 3285 arch/arm/nwfpe/softfloat.c && (bits64) ( extractFloatx80Frac( b )<<1 ) ) bits64 3317 arch/arm/nwfpe/softfloat.c && (bits64) ( extractFloatx80Frac( a )<<1 ) ) bits64 3319 arch/arm/nwfpe/softfloat.c && (bits64) ( extractFloatx80Frac( b )<<1 ) ) bits64 3350 arch/arm/nwfpe/softfloat.c && (bits64) ( extractFloatx80Frac( a )<<1 ) ) bits64 3352 arch/arm/nwfpe/softfloat.c && (bits64) ( extractFloatx80Frac( b )<<1 ) ) bits64 3379 arch/arm/nwfpe/softfloat.c && (bits64) ( extractFloatx80Frac( a )<<1 ) ) bits64 3381 arch/arm/nwfpe/softfloat.c && (bits64) ( extractFloatx80Frac( b )<<1 ) ) bits64 3413 arch/arm/nwfpe/softfloat.c && (bits64) ( extractFloatx80Frac( a )<<1 ) ) bits64 3415 arch/arm/nwfpe/softfloat.c && (bits64) ( extractFloatx80Frac( b )<<1 ) ) bits64 261 arch/arm/nwfpe/softfloat.h return (a == b) || ((bits64) ((a | b) << 1) == 0); bits64 271 arch/arm/nwfpe/softfloat.h return aSign && ((bits64) ((a | b) << 1) != 0); bits64 71 arch/sh/kernel/cpu/sh4/softfloat.c bits64 extractFloat64Frac(float64 a); bits64 77 arch/sh/kernel/cpu/sh4/softfloat.c float64 packFloat64(flag zSign, int16 zExp, bits64 zSig); bits64 78 arch/sh/kernel/cpu/sh4/softfloat.c void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr); bits64 90 arch/sh/kernel/cpu/sh4/softfloat.c void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, bits64 91 arch/sh/kernel/cpu/sh4/softfloat.c bits64 * z1Ptr); bits64 92 arch/sh/kernel/cpu/sh4/softfloat.c void sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, bits64 93 arch/sh/kernel/cpu/sh4/softfloat.c bits64 * z1Ptr); bits64 94 arch/sh/kernel/cpu/sh4/softfloat.c void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr); bits64 97 arch/sh/kernel/cpu/sh4/softfloat.c static int8 countLeadingZeros64(bits64 a); bits64 99 arch/sh/kernel/cpu/sh4/softfloat.c bits64 zSig); bits64 105 arch/sh/kernel/cpu/sh4/softfloat.c static float64 roundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig); bits64 108 arch/sh/kernel/cpu/sh4/softfloat.c static void normalizeFloat64Subnormal(bits64 aSig, int16 * zExpPtr, bits64 109 arch/sh/kernel/cpu/sh4/softfloat.c bits64 * zSigPtr); bits64 110 arch/sh/kernel/cpu/sh4/softfloat.c static bits64 estimateDiv128To64(bits64 a0, bits64 a1, bits64 b); bits64 114 arch/sh/kernel/cpu/sh4/softfloat.c bits64 extractFloat64Frac(float64 a) bits64 144 arch/sh/kernel/cpu/sh4/softfloat.c float64 packFloat64(flag zSign, int16 zExp, bits64 zSig) bits64 146 arch/sh/kernel/cpu/sh4/softfloat.c return (((bits64) zSign) << 63) + (((bits64) zExp) << 52) + zSig; bits64 149 arch/sh/kernel/cpu/sh4/softfloat.c void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr) bits64 151 arch/sh/kernel/cpu/sh4/softfloat.c bits64 z; bits64 199 arch/sh/kernel/cpu/sh4/softfloat.c static int8 countLeadingZeros64(bits64 a) bits64 204 arch/sh/kernel/cpu/sh4/softfloat.c if (a < ((bits64) 1) << 32) { bits64 214 arch/sh/kernel/cpu/sh4/softfloat.c static float64 normalizeRoundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig) bits64 227 arch/sh/kernel/cpu/sh4/softfloat.c bits64 aSig, bSig, zSig; bits64 288 arch/sh/kernel/cpu/sh4/softfloat.c bits64 aSig, bSig, zSig; bits64 411 arch/sh/kernel/cpu/sh4/softfloat.c static float64 roundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig) bits64 629 arch/sh/kernel/cpu/sh4/softfloat.c normalizeFloat64Subnormal(bits64 aSig, int16 * zExpPtr, bits64 * zSigPtr) bits64 638 arch/sh/kernel/cpu/sh4/softfloat.c void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, bits64 639 arch/sh/kernel/cpu/sh4/softfloat.c bits64 * z1Ptr) bits64 641 arch/sh/kernel/cpu/sh4/softfloat.c bits64 z1; bits64 649 arch/sh/kernel/cpu/sh4/softfloat.c sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, bits64 650 arch/sh/kernel/cpu/sh4/softfloat.c bits64 * z1Ptr) bits64 656 arch/sh/kernel/cpu/sh4/softfloat.c static bits64 estimateDiv128To64(bits64 a0, bits64 a1, bits64 b) bits64 658 arch/sh/kernel/cpu/sh4/softfloat.c bits64 b0, b1; bits64 659 arch/sh/kernel/cpu/sh4/softfloat.c bits64 rem0, rem1, term0, term1; bits64 660 arch/sh/kernel/cpu/sh4/softfloat.c bits64 z, tmp; bits64 682 arch/sh/kernel/cpu/sh4/softfloat.c void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr) bits64 685 arch/sh/kernel/cpu/sh4/softfloat.c bits64 z0, zMiddleA, zMiddleB, z1; bits64 691 arch/sh/kernel/cpu/sh4/softfloat.c z1 = ((bits64) aLow) * bLow; bits64 692 arch/sh/kernel/cpu/sh4/softfloat.c zMiddleA = ((bits64) aLow) * bHigh; bits64 693 arch/sh/kernel/cpu/sh4/softfloat.c zMiddleB = ((bits64) aHigh) * bLow; bits64 694 arch/sh/kernel/cpu/sh4/softfloat.c z0 = ((bits64) aHigh) * bHigh; bits64 696 arch/sh/kernel/cpu/sh4/softfloat.c z0 += (((bits64) (zMiddleA < zMiddleB)) << 32) + (zMiddleA >> 32); bits64 720 arch/sh/kernel/cpu/sh4/softfloat.c bits64 aSig, bSig, zSig; bits64 721 arch/sh/kernel/cpu/sh4/softfloat.c bits64 rem0, rem1; bits64 722 arch/sh/kernel/cpu/sh4/softfloat.c bits64 term0, term1; bits64 814 arch/sh/kernel/cpu/sh4/softfloat.c zSig = (((bits64) aSig) << 32); bits64 818 arch/sh/kernel/cpu/sh4/softfloat.c zSig |= (((bits64) bSig) * zSig != ((bits64) aSig) << 32); bits64 916 arch/sh/kernel/cpu/sh4/softfloat.c bits64 aSig;