aboutsummaryrefslogtreecommitdiff
path: root/fpu/softfloat.c
diff options
context:
space:
mode:
Diffstat (limited to 'fpu/softfloat.c')
-rw-r--r--fpu/softfloat.c68
1 files changed, 40 insertions, 28 deletions
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
index 59ca356d0e..46ae206172 100644
--- a/fpu/softfloat.c
+++ b/fpu/softfloat.c
@@ -1112,19 +1112,38 @@ static FloatParts div_floats(FloatParts a, FloatParts b, float_status *s)
bool sign = a.sign ^ b.sign;
if (a.cls == float_class_normal && b.cls == float_class_normal) {
- uint64_t temp_lo, temp_hi;
+ uint64_t n0, n1, q, r;
int exp = a.exp - b.exp;
+
+ /*
+ * We want a 2*N / N-bit division to produce exactly an N-bit
+ * result, so that we do not lose any precision and so that we
+ * do not have to renormalize afterward. If A.frac < B.frac,
+ * then division would produce an (N-1)-bit result; shift A left
+ * by one to produce the an N-bit result, and decrement the
+ * exponent to match.
+ *
+ * The udiv_qrnnd algorithm that we're using requires normalization,
+ * i.e. the msb of the denominator must be set. Since we know that
+ * DECOMPOSED_BINARY_POINT is msb-1, the inputs must be shifted left
+ * by one (more), and the remainder must be shifted right by one.
+ */
if (a.frac < b.frac) {
exp -= 1;
- shortShift128Left(0, a.frac, DECOMPOSED_BINARY_POINT + 1,
- &temp_hi, &temp_lo);
+ shift128Left(0, a.frac, DECOMPOSED_BINARY_POINT + 2, &n1, &n0);
} else {
- shortShift128Left(0, a.frac, DECOMPOSED_BINARY_POINT,
- &temp_hi, &temp_lo);
+ shift128Left(0, a.frac, DECOMPOSED_BINARY_POINT + 1, &n1, &n0);
}
- /* LSB of quot is set if inexact which roundandpack will use
- * to set flags. Yet again we re-use a for the result */
- a.frac = div128To64(temp_lo, temp_hi, b.frac);
+ q = udiv_qrnnd(&r, n1, n0, b.frac << 1);
+
+ /*
+ * Set lsb if there is a remainder, to set inexact.
+ * As mentioned above, to find the actual value of the remainder we
+ * would need to shift right, but (1) we are only concerned about
+ * non-zero-ness, and (2) the remainder will always be even because
+ * both inputs to the division primitive are even.
+ */
+ a.frac = q | (r != 0);
a.sign = sign;
a.exp = exp;
return a;
@@ -1409,13 +1428,6 @@ float64 float64_round_to_int(float64 a, float_status *s)
return float64_round_pack_canonical(pr, s);
}
-float64 float64_trunc_to_int(float64 a, float_status *s)
-{
- FloatParts pa = float64_unpack_canonical(a, s);
- FloatParts pr = round_to_int(pa, float_round_to_zero, 0, s);
- return float64_round_pack_canonical(pr, s);
-}
-
/*
* Returns the result of converting the floating-point value `a' to
* the two's complement integer format. The conversion is performed
@@ -2690,7 +2702,7 @@ static void
{
int8_t shiftCount;
- shiftCount = countLeadingZeros32( aSig ) - 8;
+ shiftCount = clz32(aSig) - 8;
*zSigPtr = aSig<<shiftCount;
*zExpPtr = 1 - shiftCount;
@@ -2798,7 +2810,7 @@ static float32
{
int8_t shiftCount;
- shiftCount = countLeadingZeros32( zSig ) - 1;
+ shiftCount = clz32(zSig) - 1;
return roundAndPackFloat32(zSign, zExp - shiftCount, zSig<<shiftCount,
status);
@@ -2831,7 +2843,7 @@ static void
{
int8_t shiftCount;
- shiftCount = countLeadingZeros64( aSig ) - 11;
+ shiftCount = clz64(aSig) - 11;
*zSigPtr = aSig<<shiftCount;
*zExpPtr = 1 - shiftCount;
@@ -2969,7 +2981,7 @@ static float64
{
int8_t shiftCount;
- shiftCount = countLeadingZeros64( zSig ) - 1;
+ shiftCount = clz64(zSig) - 1;
return roundAndPackFloat64(zSign, zExp - shiftCount, zSig<<shiftCount,
status);
@@ -2987,7 +2999,7 @@ void normalizeFloatx80Subnormal(uint64_t aSig, int32_t *zExpPtr,
{
int8_t shiftCount;
- shiftCount = countLeadingZeros64( aSig );
+ shiftCount = clz64(aSig);
*zSigPtr = aSig<<shiftCount;
*zExpPtr = 1 - shiftCount;
}
@@ -3226,7 +3238,7 @@ floatx80 normalizeRoundAndPackFloatx80(int8_t roundingPrecision,
zSig1 = 0;
zExp -= 64;
}
- shiftCount = countLeadingZeros64( zSig0 );
+ shiftCount = clz64(zSig0);
shortShift128Left( zSig0, zSig1, shiftCount, &zSig0, &zSig1 );
zExp -= shiftCount;
return roundAndPackFloatx80(roundingPrecision, zSign, zExp,
@@ -3303,7 +3315,7 @@ static void
int8_t shiftCount;
if ( aSig0 == 0 ) {
- shiftCount = countLeadingZeros64( aSig1 ) - 15;
+ shiftCount = clz64(aSig1) - 15;
if ( shiftCount < 0 ) {
*zSig0Ptr = aSig1>>( - shiftCount );
*zSig1Ptr = aSig1<<( shiftCount & 63 );
@@ -3315,7 +3327,7 @@ static void
*zExpPtr = - shiftCount - 63;
}
else {
- shiftCount = countLeadingZeros64( aSig0 ) - 15;
+ shiftCount = clz64(aSig0) - 15;
shortShift128Left( aSig0, aSig1, shiftCount, zSig0Ptr, zSig1Ptr );
*zExpPtr = 1 - shiftCount;
}
@@ -3504,7 +3516,7 @@ static float128 normalizeRoundAndPackFloat128(flag zSign, int32_t zExp,
zSig1 = 0;
zExp -= 64;
}
- shiftCount = countLeadingZeros64( zSig0 ) - 15;
+ shiftCount = clz64(zSig0) - 15;
if ( 0 <= shiftCount ) {
zSig2 = 0;
shortShift128Left( zSig0, zSig1, shiftCount, &zSig0, &zSig1 );
@@ -3536,7 +3548,7 @@ floatx80 int32_to_floatx80(int32_t a, float_status *status)
if ( a == 0 ) return packFloatx80( 0, 0, 0 );
zSign = ( a < 0 );
absA = zSign ? - a : a;
- shiftCount = countLeadingZeros32( absA ) + 32;
+ shiftCount = clz32(absA) + 32;
zSig = absA;
return packFloatx80( zSign, 0x403E - shiftCount, zSig<<shiftCount );
@@ -3558,7 +3570,7 @@ float128 int32_to_float128(int32_t a, float_status *status)
if ( a == 0 ) return packFloat128( 0, 0, 0, 0 );
zSign = ( a < 0 );
absA = zSign ? - a : a;
- shiftCount = countLeadingZeros32( absA ) + 17;
+ shiftCount = clz32(absA) + 17;
zSig0 = absA;
return packFloat128( zSign, 0x402E - shiftCount, zSig0<<shiftCount, 0 );
@@ -3580,7 +3592,7 @@ floatx80 int64_to_floatx80(int64_t a, float_status *status)
if ( a == 0 ) return packFloatx80( 0, 0, 0 );
zSign = ( a < 0 );
absA = zSign ? - a : a;
- shiftCount = countLeadingZeros64( absA );
+ shiftCount = clz64(absA);
return packFloatx80( zSign, 0x403E - shiftCount, absA<<shiftCount );
}
@@ -3602,7 +3614,7 @@ float128 int64_to_float128(int64_t a, float_status *status)
if ( a == 0 ) return packFloat128( 0, 0, 0, 0 );
zSign = ( a < 0 );
absA = zSign ? - a : a;
- shiftCount = countLeadingZeros64( absA ) + 49;
+ shiftCount = clz64(absA) + 49;
zExp = 0x406E - shiftCount;
if ( 64 <= shiftCount ) {
zSig1 = 0;