bSig             1032 arch/arm/nwfpe/softfloat.c     bits32 aSig, bSig, zSig;
bSig             1037 arch/arm/nwfpe/softfloat.c     bSig = extractFloat32Frac( b );
bSig             1041 arch/arm/nwfpe/softfloat.c     bSig <<= 6;
bSig             1051 arch/arm/nwfpe/softfloat.c             bSig |= 0x20000000;
bSig             1053 arch/arm/nwfpe/softfloat.c         shift32RightJamming( bSig, expDiff, &bSig );
bSig             1058 arch/arm/nwfpe/softfloat.c             if ( bSig ) return propagateFloat32NaN( a, b );
bSig             1072 arch/arm/nwfpe/softfloat.c             if ( aSig | bSig ) return propagateFloat32NaN( a, b );
bSig             1075 arch/arm/nwfpe/softfloat.c         if ( aExp == 0 ) return packFloat32( zSign, 0, ( aSig + bSig )>>6 );
bSig             1076 arch/arm/nwfpe/softfloat.c         zSig = 0x40000000 + aSig + bSig;
bSig             1081 arch/arm/nwfpe/softfloat.c     zSig = ( aSig + bSig )<<1;
bSig             1084 arch/arm/nwfpe/softfloat.c         zSig = aSig + bSig;
bSig             1104 arch/arm/nwfpe/softfloat.c     bits32 aSig, bSig, zSig;
bSig             1109 arch/arm/nwfpe/softfloat.c     bSig = extractFloat32Frac( b );
bSig             1113 arch/arm/nwfpe/softfloat.c     bSig <<= 7;
bSig             1117 arch/arm/nwfpe/softfloat.c         if ( aSig | bSig ) return propagateFloat32NaN( a, b );
bSig             1125 arch/arm/nwfpe/softfloat.c     if ( bSig < aSig ) goto aBigger;
bSig             1126 arch/arm/nwfpe/softfloat.c     if ( aSig < bSig ) goto bBigger;
bSig             1130 arch/arm/nwfpe/softfloat.c         if ( bSig ) return propagateFloat32NaN( a, b );
bSig             1140 arch/arm/nwfpe/softfloat.c     bSig |= 0x40000000;
bSig             1142 arch/arm/nwfpe/softfloat.c     zSig = bSig - aSig;
bSig             1155 arch/arm/nwfpe/softfloat.c         bSig |= 0x40000000;
bSig             1157 arch/arm/nwfpe/softfloat.c     shift32RightJamming( bSig, expDiff, &bSig );
bSig             1160 arch/arm/nwfpe/softfloat.c     zSig = aSig - bSig;
bSig             1223 arch/arm/nwfpe/softfloat.c     bits32 aSig, bSig;
bSig             1230 arch/arm/nwfpe/softfloat.c     bSig = extractFloat32Frac( b );
bSig             1235 arch/arm/nwfpe/softfloat.c         if ( aSig || ( ( bExp == 0xFF ) && bSig ) ) {
bSig             1238 arch/arm/nwfpe/softfloat.c         if ( ( bExp | bSig ) == 0 ) {
bSig             1245 arch/arm/nwfpe/softfloat.c         if ( bSig ) return propagateFloat32NaN( a, b );
bSig             1257 arch/arm/nwfpe/softfloat.c         if ( bSig == 0 ) return packFloat32( zSign, 0, 0 );
bSig             1258 arch/arm/nwfpe/softfloat.c         normalizeFloat32Subnormal( bSig, &bExp, &bSig );
bSig             1262 arch/arm/nwfpe/softfloat.c     bSig = ( bSig | 0x00800000 )<<8;
bSig             1263 arch/arm/nwfpe/softfloat.c     shift64RightJamming( ( (bits64) aSig ) * bSig, 32, &zSig64 );
bSig             1284 arch/arm/nwfpe/softfloat.c     bits32 aSig, bSig, zSig;
bSig             1289 arch/arm/nwfpe/softfloat.c     bSig = extractFloat32Frac( b );
bSig             1296 arch/arm/nwfpe/softfloat.c             if ( bSig ) return propagateFloat32NaN( a, b );
bSig             1303 arch/arm/nwfpe/softfloat.c         if ( bSig ) return propagateFloat32NaN( a, b );
bSig             1307 arch/arm/nwfpe/softfloat.c         if ( bSig == 0 ) {
bSig             1315 arch/arm/nwfpe/softfloat.c         normalizeFloat32Subnormal( bSig, &bExp, &bSig );
bSig             1323 arch/arm/nwfpe/softfloat.c     bSig = ( bSig | 0x00800000 )<<8;
bSig             1324 arch/arm/nwfpe/softfloat.c     if ( bSig <= ( aSig + aSig ) ) {
bSig             1330 arch/arm/nwfpe/softfloat.c         do_div( tmp, bSig );
bSig             1334 arch/arm/nwfpe/softfloat.c         zSig |= ( ( (bits64) bSig ) * zSig != ( (bits64) aSig )<<32 );
bSig             1351 arch/arm/nwfpe/softfloat.c     bits32 aSig, bSig;
bSig             1360 arch/arm/nwfpe/softfloat.c     bSig = extractFloat32Frac( b );
bSig             1364 arch/arm/nwfpe/softfloat.c         if ( aSig || ( ( bExp == 0xFF ) && bSig ) ) {
bSig             1371 arch/arm/nwfpe/softfloat.c         if ( bSig ) return propagateFloat32NaN( a, b );
bSig             1375 arch/arm/nwfpe/softfloat.c         if ( bSig == 0 ) {
bSig             1379 arch/arm/nwfpe/softfloat.c         normalizeFloat32Subnormal( bSig, &bExp, &bSig );
bSig             1387 arch/arm/nwfpe/softfloat.c     bSig |= 0x00800000;
bSig             1390 arch/arm/nwfpe/softfloat.c         bSig <<= 8;
bSig             1395 arch/arm/nwfpe/softfloat.c         q = ( bSig <= aSig );
bSig             1396 arch/arm/nwfpe/softfloat.c         if ( q ) aSig -= bSig;
bSig             1399 arch/arm/nwfpe/softfloat.c             do_div( tmp, bSig );
bSig             1402 arch/arm/nwfpe/softfloat.c             bSig >>= 2;
bSig             1403 arch/arm/nwfpe/softfloat.c             aSig = ( ( aSig>>1 )<<( expDiff - 1 ) ) - bSig * q;
bSig             1407 arch/arm/nwfpe/softfloat.c             bSig >>= 2;
bSig             1411 arch/arm/nwfpe/softfloat.c         if ( bSig <= aSig ) aSig -= bSig;
bSig             1413 arch/arm/nwfpe/softfloat.c         bSig64 = ( (bits64) bSig )<<40;
bSig             1418 arch/arm/nwfpe/softfloat.c             aSig64 = - ( ( bSig * q64 )<<38 );
bSig             1425 arch/arm/nwfpe/softfloat.c         bSig <<= 6;
bSig             1426 arch/arm/nwfpe/softfloat.c         aSig = ( ( aSig64>>33 )<<( expDiff - 1 ) ) - bSig * q;
bSig             1431 arch/arm/nwfpe/softfloat.c         aSig -= bSig;
bSig             1928 arch/arm/nwfpe/softfloat.c     bits64 aSig, bSig, zSig;
bSig             1933 arch/arm/nwfpe/softfloat.c     bSig = extractFloat64Frac( b );
bSig             1937 arch/arm/nwfpe/softfloat.c     bSig <<= 9;
bSig             1947 arch/arm/nwfpe/softfloat.c             bSig |= LIT64( 0x2000000000000000 );
bSig             1949 arch/arm/nwfpe/softfloat.c         shift64RightJamming( bSig, expDiff, &bSig );
bSig             1954 arch/arm/nwfpe/softfloat.c             if ( bSig ) return propagateFloat64NaN( a, b );
bSig             1968 arch/arm/nwfpe/softfloat.c             if ( aSig | bSig ) return propagateFloat64NaN( a, b );
bSig             1971 arch/arm/nwfpe/softfloat.c         if ( aExp == 0 ) return packFloat64( zSign, 0, ( aSig + bSig )>>9 );
bSig             1972 arch/arm/nwfpe/softfloat.c         zSig = LIT64( 0x4000000000000000 ) + aSig + bSig;
bSig             1977 arch/arm/nwfpe/softfloat.c     zSig = ( aSig + bSig )<<1;
bSig             1980 arch/arm/nwfpe/softfloat.c         zSig = aSig + bSig;
bSig             2000 arch/arm/nwfpe/softfloat.c     bits64 aSig, bSig, zSig;
bSig             2005 arch/arm/nwfpe/softfloat.c     bSig = extractFloat64Frac( b );
bSig             2009 arch/arm/nwfpe/softfloat.c     bSig <<= 10;
bSig             2013 arch/arm/nwfpe/softfloat.c         if ( aSig | bSig ) return propagateFloat64NaN( a, b );
bSig             2021 arch/arm/nwfpe/softfloat.c     if ( bSig < aSig ) goto aBigger;
bSig             2022 arch/arm/nwfpe/softfloat.c     if ( aSig < bSig ) goto bBigger;
bSig             2026 arch/arm/nwfpe/softfloat.c         if ( bSig ) return propagateFloat64NaN( a, b );
bSig             2036 arch/arm/nwfpe/softfloat.c     bSig |= LIT64( 0x4000000000000000 );
bSig             2038 arch/arm/nwfpe/softfloat.c     zSig = bSig - aSig;
bSig             2051 arch/arm/nwfpe/softfloat.c         bSig |= LIT64( 0x4000000000000000 );
bSig             2053 arch/arm/nwfpe/softfloat.c     shift64RightJamming( bSig, expDiff, &bSig );
bSig             2056 arch/arm/nwfpe/softfloat.c     zSig = aSig - bSig;
bSig             2119 arch/arm/nwfpe/softfloat.c     bits64 aSig, bSig, zSig0, zSig1;
bSig             2124 arch/arm/nwfpe/softfloat.c     bSig = extractFloat64Frac( b );
bSig             2129 arch/arm/nwfpe/softfloat.c         if ( aSig || ( ( bExp == 0x7FF ) && bSig ) ) {
bSig             2132 arch/arm/nwfpe/softfloat.c         if ( ( bExp | bSig ) == 0 ) {
bSig             2139 arch/arm/nwfpe/softfloat.c         if ( bSig ) return propagateFloat64NaN( a, b );
bSig             2151 arch/arm/nwfpe/softfloat.c         if ( bSig == 0 ) return packFloat64( zSign, 0, 0 );
bSig             2152 arch/arm/nwfpe/softfloat.c         normalizeFloat64Subnormal( bSig, &bExp, &bSig );
bSig             2156 arch/arm/nwfpe/softfloat.c     bSig = ( bSig | LIT64( 0x0010000000000000 ) )<<11;
bSig             2157 arch/arm/nwfpe/softfloat.c     mul64To128( aSig, bSig, &zSig0, &zSig1 );
bSig             2178 arch/arm/nwfpe/softfloat.c     bits64 aSig, bSig, zSig;
bSig             2185 arch/arm/nwfpe/softfloat.c     bSig = extractFloat64Frac( b );
bSig             2192 arch/arm/nwfpe/softfloat.c             if ( bSig ) return propagateFloat64NaN( a, b );
bSig             2199 arch/arm/nwfpe/softfloat.c         if ( bSig ) return propagateFloat64NaN( a, b );
bSig             2203 arch/arm/nwfpe/softfloat.c         if ( bSig == 0 ) {
bSig             2211 arch/arm/nwfpe/softfloat.c         normalizeFloat64Subnormal( bSig, &bExp, &bSig );
bSig             2219 arch/arm/nwfpe/softfloat.c     bSig = ( bSig | LIT64( 0x0010000000000000 ) )<<11;
bSig             2220 arch/arm/nwfpe/softfloat.c     if ( bSig <= ( aSig + aSig ) ) {
bSig             2224 arch/arm/nwfpe/softfloat.c     zSig = estimateDiv128To64( aSig, 0, bSig );
bSig             2226 arch/arm/nwfpe/softfloat.c         mul64To128( bSig, zSig, &term0, &term1 );
bSig             2230 arch/arm/nwfpe/softfloat.c             add128( rem0, rem1, 0, bSig, &rem0, &rem1 );
bSig             2249 arch/arm/nwfpe/softfloat.c     bits64 aSig, bSig;
bSig             2256 arch/arm/nwfpe/softfloat.c     bSig = extractFloat64Frac( b );
bSig             2260 arch/arm/nwfpe/softfloat.c         if ( aSig || ( ( bExp == 0x7FF ) && bSig ) ) {
bSig             2267 arch/arm/nwfpe/softfloat.c         if ( bSig ) return propagateFloat64NaN( a, b );
bSig             2271 arch/arm/nwfpe/softfloat.c         if ( bSig == 0 ) {
bSig             2275 arch/arm/nwfpe/softfloat.c         normalizeFloat64Subnormal( bSig, &bExp, &bSig );
bSig             2283 arch/arm/nwfpe/softfloat.c     bSig = ( bSig | LIT64( 0x0010000000000000 ) )<<11;
bSig             2288 arch/arm/nwfpe/softfloat.c     q = ( bSig <= aSig );
bSig             2289 arch/arm/nwfpe/softfloat.c     if ( q ) aSig -= bSig;
bSig             2292 arch/arm/nwfpe/softfloat.c         q = estimateDiv128To64( aSig, 0, bSig );
bSig             2294 arch/arm/nwfpe/softfloat.c         aSig = - ( ( bSig>>2 ) * q );
bSig             2299 arch/arm/nwfpe/softfloat.c         q = estimateDiv128To64( aSig, 0, bSig );
bSig             2302 arch/arm/nwfpe/softfloat.c         bSig >>= 2;
bSig             2303 arch/arm/nwfpe/softfloat.c         aSig = ( ( aSig>>1 )<<( expDiff - 1 ) ) - bSig * q;
bSig             2307 arch/arm/nwfpe/softfloat.c         bSig >>= 2;
bSig             2312 arch/arm/nwfpe/softfloat.c         aSig -= bSig;
bSig             2748 arch/arm/nwfpe/softfloat.c     bits64 aSig, bSig, zSig0, zSig1;
bSig             2753 arch/arm/nwfpe/softfloat.c     bSig = extractFloatx80Frac( b );
bSig             2762 arch/arm/nwfpe/softfloat.c         shift64ExtraRightJamming( bSig, 0, expDiff, &bSig, &zSig1 );
bSig             2767 arch/arm/nwfpe/softfloat.c             if ( (bits64) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b );
bSig             2776 arch/arm/nwfpe/softfloat.c             if ( (bits64) ( ( aSig | bSig )<<1 ) ) {
bSig             2782 arch/arm/nwfpe/softfloat.c         zSig0 = aSig + bSig;
bSig             2791 arch/arm/nwfpe/softfloat.c     zSig0 = aSig + bSig;
bSig             2817 arch/arm/nwfpe/softfloat.c     bits64 aSig, bSig, zSig0, zSig1;
bSig             2823 arch/arm/nwfpe/softfloat.c     bSig = extractFloatx80Frac( b );
bSig             2829 arch/arm/nwfpe/softfloat.c         if ( (bits64) ( ( aSig | bSig )<<1 ) ) {
bSig             2843 arch/arm/nwfpe/softfloat.c     if ( bSig < aSig ) goto aBigger;
bSig             2844 arch/arm/nwfpe/softfloat.c     if ( aSig < bSig ) goto bBigger;
bSig             2848 arch/arm/nwfpe/softfloat.c         if ( (bits64) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b );
bSig             2854 arch/arm/nwfpe/softfloat.c     sub128( bSig, 0, aSig, zSig1, &zSig0, &zSig1 );
bSig             2864 arch/arm/nwfpe/softfloat.c     shift128RightJamming( bSig, 0, expDiff, &bSig, &zSig1 );
bSig             2866 arch/arm/nwfpe/softfloat.c     sub128( aSig, 0, bSig, zSig1, &zSig0, &zSig1 );
bSig             2930 arch/arm/nwfpe/softfloat.c     bits64 aSig, bSig, zSig0, zSig1;
bSig             2936 arch/arm/nwfpe/softfloat.c     bSig = extractFloatx80Frac( b );
bSig             2942 arch/arm/nwfpe/softfloat.c              || ( ( bExp == 0x7FFF ) && (bits64) ( bSig<<1 ) ) ) {
bSig             2945 arch/arm/nwfpe/softfloat.c         if ( ( bExp | bSig ) == 0 ) goto invalid;
bSig             2949 arch/arm/nwfpe/softfloat.c         if ( (bits64) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b );
bSig             2965 arch/arm/nwfpe/softfloat.c         if ( bSig == 0 ) return packFloatx80( zSign, 0, 0 );
bSig             2966 arch/arm/nwfpe/softfloat.c         normalizeFloatx80Subnormal( bSig, &bExp, &bSig );
bSig             2969 arch/arm/nwfpe/softfloat.c     mul64To128( aSig, bSig, &zSig0, &zSig1 );
bSig             2991 arch/arm/nwfpe/softfloat.c     bits64 aSig, bSig, zSig0, zSig1;
bSig             2998 arch/arm/nwfpe/softfloat.c     bSig = extractFloatx80Frac( b );
bSig             3005 arch/arm/nwfpe/softfloat.c             if ( (bits64) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b );
bSig             3011 arch/arm/nwfpe/softfloat.c         if ( (bits64) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b );
bSig             3015 arch/arm/nwfpe/softfloat.c         if ( bSig == 0 ) {
bSig             3027 arch/arm/nwfpe/softfloat.c         normalizeFloatx80Subnormal( bSig, &bExp, &bSig );
bSig             3035 arch/arm/nwfpe/softfloat.c     if ( bSig <= aSig ) {
bSig             3039 arch/arm/nwfpe/softfloat.c     zSig0 = estimateDiv128To64( aSig, rem1, bSig );
bSig             3040 arch/arm/nwfpe/softfloat.c     mul64To128( bSig, zSig0, &term0, &term1 );
bSig             3044 arch/arm/nwfpe/softfloat.c         add128( rem0, rem1, 0, bSig, &rem0, &rem1 );
bSig             3046 arch/arm/nwfpe/softfloat.c     zSig1 = estimateDiv128To64( rem1, 0, bSig );
bSig             3048 arch/arm/nwfpe/softfloat.c         mul64To128( bSig, zSig1, &term1, &term2 );
bSig             3052 arch/arm/nwfpe/softfloat.c             add128( rem1, rem2, 0, bSig, &rem1, &rem2 );
bSig             3073 arch/arm/nwfpe/softfloat.c     bits64 aSig0, aSig1, bSig;
bSig             3080 arch/arm/nwfpe/softfloat.c     bSig = extractFloatx80Frac( b );
bSig             3085 arch/arm/nwfpe/softfloat.c              || ( ( bExp == 0x7FFF ) && (bits64) ( bSig<<1 ) ) ) {
bSig             3091 arch/arm/nwfpe/softfloat.c         if ( (bits64) ( bSig<<1 ) ) return propagateFloatx80NaN( a, b );
bSig             3095 arch/arm/nwfpe/softfloat.c         if ( bSig == 0 ) {
bSig             3103 arch/arm/nwfpe/softfloat.c         normalizeFloatx80Subnormal( bSig, &bExp, &bSig );
bSig             3109 arch/arm/nwfpe/softfloat.c     bSig |= LIT64( 0x8000000000000000 );
bSig             3118 arch/arm/nwfpe/softfloat.c     q = ( bSig <= aSig0 );
bSig             3119 arch/arm/nwfpe/softfloat.c     if ( q ) aSig0 -= bSig;
bSig             3122 arch/arm/nwfpe/softfloat.c         q = estimateDiv128To64( aSig0, aSig1, bSig );
bSig             3124 arch/arm/nwfpe/softfloat.c         mul64To128( bSig, q, &term0, &term1 );
bSig             3131 arch/arm/nwfpe/softfloat.c         q = estimateDiv128To64( aSig0, aSig1, bSig );
bSig             3134 arch/arm/nwfpe/softfloat.c         mul64To128( bSig, q<<( 64 - expDiff ), &term0, &term1 );
bSig             3136 arch/arm/nwfpe/softfloat.c         shortShift128Left( 0, bSig, 64 - expDiff, &term0, &term1 );
bSig             3144 arch/arm/nwfpe/softfloat.c         term0 = bSig;
bSig              227 arch/sh/kernel/cpu/sh4/softfloat.c 	bits64 aSig, bSig, zSig;
bSig              232 arch/sh/kernel/cpu/sh4/softfloat.c 	bSig = extractFloat64Frac(b);
bSig              236 arch/sh/kernel/cpu/sh4/softfloat.c 	bSig <<= 10;
bSig              245 arch/sh/kernel/cpu/sh4/softfloat.c 	if (bSig < aSig)
bSig              247 arch/sh/kernel/cpu/sh4/softfloat.c 	if (aSig < bSig)
bSig              260 arch/sh/kernel/cpu/sh4/softfloat.c 	bSig |= LIT64(0x4000000000000000);
bSig              262 arch/sh/kernel/cpu/sh4/softfloat.c 	zSig = bSig - aSig;
bSig              273 arch/sh/kernel/cpu/sh4/softfloat.c 		bSig |= LIT64(0x4000000000000000);
bSig              275 arch/sh/kernel/cpu/sh4/softfloat.c 	shift64RightJamming(bSig, expDiff, &bSig);
bSig              278 arch/sh/kernel/cpu/sh4/softfloat.c 	zSig = aSig - bSig;
bSig              288 arch/sh/kernel/cpu/sh4/softfloat.c 	bits64 aSig, bSig, zSig;
bSig              293 arch/sh/kernel/cpu/sh4/softfloat.c 	bSig = extractFloat64Frac(b);
bSig              297 arch/sh/kernel/cpu/sh4/softfloat.c 	bSig <<= 9;
bSig              305 arch/sh/kernel/cpu/sh4/softfloat.c 			bSig |= LIT64(0x2000000000000000);
bSig              307 arch/sh/kernel/cpu/sh4/softfloat.c 		shift64RightJamming(bSig, expDiff, &bSig);
bSig              325 arch/sh/kernel/cpu/sh4/softfloat.c 			return packFloat64(zSign, 0, (aSig + bSig) >> 9);
bSig              326 arch/sh/kernel/cpu/sh4/softfloat.c 		zSig = LIT64(0x4000000000000000) + aSig + bSig;
bSig              331 arch/sh/kernel/cpu/sh4/softfloat.c 	zSig = (aSig + bSig) << 1;
bSig              334 arch/sh/kernel/cpu/sh4/softfloat.c 		zSig = aSig + bSig;
bSig              457 arch/sh/kernel/cpu/sh4/softfloat.c 	bits32 aSig, bSig, zSig;
bSig              462 arch/sh/kernel/cpu/sh4/softfloat.c 	bSig = extractFloat32Frac(b);
bSig              466 arch/sh/kernel/cpu/sh4/softfloat.c 	bSig <<= 7;
bSig              475 arch/sh/kernel/cpu/sh4/softfloat.c 	if (bSig < aSig)
bSig              477 arch/sh/kernel/cpu/sh4/softfloat.c 	if (aSig < bSig)
bSig              490 arch/sh/kernel/cpu/sh4/softfloat.c 	bSig |= 0x40000000;
bSig              492 arch/sh/kernel/cpu/sh4/softfloat.c 	zSig = bSig - aSig;
bSig              503 arch/sh/kernel/cpu/sh4/softfloat.c 		bSig |= 0x40000000;
bSig              505 arch/sh/kernel/cpu/sh4/softfloat.c 	shift32RightJamming(bSig, expDiff, &bSig);
bSig              508 arch/sh/kernel/cpu/sh4/softfloat.c 	zSig = aSig - bSig;
bSig              519 arch/sh/kernel/cpu/sh4/softfloat.c 	bits32 aSig, bSig, zSig;
bSig              524 arch/sh/kernel/cpu/sh4/softfloat.c 	bSig = extractFloat32Frac(b);
bSig              528 arch/sh/kernel/cpu/sh4/softfloat.c 	bSig <<= 6;
bSig              536 arch/sh/kernel/cpu/sh4/softfloat.c 			bSig |= 0x20000000;
bSig              538 arch/sh/kernel/cpu/sh4/softfloat.c 		shift32RightJamming(bSig, expDiff, &bSig);
bSig              556 arch/sh/kernel/cpu/sh4/softfloat.c 			return packFloat32(zSign, 0, (aSig + bSig) >> 6);
bSig              557 arch/sh/kernel/cpu/sh4/softfloat.c 		zSig = 0x40000000 + aSig + bSig;
bSig              562 arch/sh/kernel/cpu/sh4/softfloat.c 	zSig = (aSig + bSig) << 1;
bSig              565 arch/sh/kernel/cpu/sh4/softfloat.c 		zSig = aSig + bSig;
bSig              720 arch/sh/kernel/cpu/sh4/softfloat.c 	bits64 aSig, bSig, zSig;
bSig              727 arch/sh/kernel/cpu/sh4/softfloat.c 	bSig = extractFloat64Frac(b);
bSig              740 arch/sh/kernel/cpu/sh4/softfloat.c 		if (bSig == 0) {
bSig              746 arch/sh/kernel/cpu/sh4/softfloat.c 		normalizeFloat64Subnormal(bSig, &bExp, &bSig);
bSig              755 arch/sh/kernel/cpu/sh4/softfloat.c 	bSig = (bSig | LIT64(0x0010000000000000)) << 11;
bSig              756 arch/sh/kernel/cpu/sh4/softfloat.c 	if (bSig <= (aSig + aSig)) {
bSig              760 arch/sh/kernel/cpu/sh4/softfloat.c 	zSig = estimateDiv128To64(aSig, 0, bSig);
bSig              762 arch/sh/kernel/cpu/sh4/softfloat.c 		mul64To128(bSig, zSig, &term0, &term1);
bSig              766 arch/sh/kernel/cpu/sh4/softfloat.c 			add128(rem0, rem1, 0, bSig, &rem0, &rem1);
bSig              778 arch/sh/kernel/cpu/sh4/softfloat.c 	bits32 aSig, bSig;
bSig              784 arch/sh/kernel/cpu/sh4/softfloat.c 	bSig = extractFloat32Frac(b);
bSig              797 arch/sh/kernel/cpu/sh4/softfloat.c 		if (bSig == 0) {
bSig              800 arch/sh/kernel/cpu/sh4/softfloat.c 		normalizeFloat32Subnormal(bSig, &bExp, &bSig);
bSig              809 arch/sh/kernel/cpu/sh4/softfloat.c 	bSig = (bSig | 0x00800000) << 8;
bSig              810 arch/sh/kernel/cpu/sh4/softfloat.c 	if (bSig <= (aSig + aSig)) {
bSig              815 arch/sh/kernel/cpu/sh4/softfloat.c 	do_div(zSig, bSig);
bSig              818 arch/sh/kernel/cpu/sh4/softfloat.c 		zSig |= (((bits64) bSig) * zSig != ((bits64) aSig) << 32);
bSig              828 arch/sh/kernel/cpu/sh4/softfloat.c 	unsigned int aSig, bSig;
bSig              835 arch/sh/kernel/cpu/sh4/softfloat.c 	bSig = extractFloat32Frac(b);
bSig              845 arch/sh/kernel/cpu/sh4/softfloat.c 		if (bSig == 0)
bSig              847 arch/sh/kernel/cpu/sh4/softfloat.c 		normalizeFloat32Subnormal(bSig, &bExp, &bSig);
bSig              849 arch/sh/kernel/cpu/sh4/softfloat.c 	if ((bExp == 0xff && bSig == 0) || (aExp == 0xff && aSig == 0))
bSig              854 arch/sh/kernel/cpu/sh4/softfloat.c 	bSig = (bSig | 0x00800000) << 8;
bSig              855 arch/sh/kernel/cpu/sh4/softfloat.c 	shift64RightJamming(((unsigned long long)aSig) * bSig, 32, &zSig64);
bSig              869 arch/sh/kernel/cpu/sh4/softfloat.c 	unsigned long long int aSig, bSig, zSig0, zSig1;
bSig              874 arch/sh/kernel/cpu/sh4/softfloat.c 	bSig = extractFloat64Frac(b);
bSig              885 arch/sh/kernel/cpu/sh4/softfloat.c 		if (bSig == 0)
bSig              887 arch/sh/kernel/cpu/sh4/softfloat.c 		normalizeFloat64Subnormal(bSig, &bExp, &bSig);
bSig              889 arch/sh/kernel/cpu/sh4/softfloat.c 	if ((aExp == 0x7ff && aSig == 0) || (bExp == 0x7ff && bSig == 0))
bSig              894 arch/sh/kernel/cpu/sh4/softfloat.c 	bSig = (bSig | 0x0010000000000000LL) << 11;
bSig              895 arch/sh/kernel/cpu/sh4/softfloat.c 	mul64To128(aSig, bSig, &zSig0, &zSig1);