diff options
Diffstat (limited to 'fpu')
-rw-r--r-- | fpu/softfloat-macros.h | 26 | ||||
-rw-r--r-- | fpu/softfloat-specialize.h | 2 | ||||
-rw-r--r-- | fpu/softfloat.c | 50 |
3 files changed, 39 insertions, 39 deletions
diff --git a/fpu/softfloat-macros.h b/fpu/softfloat-macros.h index 5e030cd8e5..e95b4450fa 100644 --- a/fpu/softfloat-macros.h +++ b/fpu/softfloat-macros.h @@ -164,7 +164,7 @@ static inline void uint64_t a0, uint64_t a1, int_fast16_t count, uint64_t *z0Ptr, uint64_t *z1Ptr) { uint64_t z0, z1; - int8 negCount = ( - count ) & 63; + int8_t negCount = ( - count ) & 63; if ( count == 0 ) { z1 = a1; @@ -201,7 +201,7 @@ static inline void uint64_t a0, uint64_t a1, int_fast16_t count, uint64_t *z0Ptr, uint64_t *z1Ptr) { uint64_t z0, z1; - int8 negCount = ( - count ) & 63; + int8_t negCount = ( - count ) & 63; if ( count == 0 ) { z1 = a1; @@ -236,7 +236,7 @@ static inline void uint64_t a0, uint64_t a1, int_fast16_t count, uint64_t *z0Ptr, uint64_t *z1Ptr) { uint64_t z0, z1; - int8 negCount = ( - count ) & 63; + int8_t negCount = ( - count ) & 63; if ( count == 0 ) { z1 = a1; @@ -294,7 +294,7 @@ static inline void ) { uint64_t z0, z1, z2; - int8 negCount = ( - count ) & 63; + int8_t negCount = ( - count ) & 63; if ( count == 0 ) { z2 = a2; @@ -371,7 +371,7 @@ static inline void ) { uint64_t z0, z1, z2; - int8 negCount; + int8_t negCount; z2 = a2<<count; z1 = a1<<count; @@ -428,7 +428,7 @@ static inline void ) { uint64_t z0, z1, z2; - int8 carry0, carry1; + int8_t carry0, carry1; z2 = a2 + b2; carry1 = ( z2 < a2 ); @@ -484,7 +484,7 @@ static inline void ) { uint64_t z0, z1, z2; - int8 borrow0, borrow1; + int8_t borrow0, borrow1; z2 = a2 - b2; borrow1 = ( a2 < b2 ); @@ -645,7 +645,7 @@ static uint32_t estimateSqrt32(int_fast16_t aExp, uint32_t a) 0x0A2D, 0x08AF, 0x075A, 0x0629, 0x051A, 0x0429, 0x0356, 0x029E, 0x0200, 0x0179, 0x0109, 0x00AF, 0x0068, 0x0034, 0x0012, 0x0002 }; - int8 index; + int8_t index; uint32_t z; index = ( a>>27 ) & 15; @@ -669,7 +669,7 @@ static uint32_t estimateSqrt32(int_fast16_t aExp, uint32_t a) | `a'. If `a' is zero, 32 is returned. *----------------------------------------------------------------------------*/ -static int8 countLeadingZeros32( uint32_t a ) +static int8_t countLeadingZeros32( uint32_t a ) { #if SOFTFLOAT_GNUC_PREREQ(3, 4) if (a) { @@ -678,7 +678,7 @@ static int8 countLeadingZeros32( uint32_t a ) return 32; } #else - static const int8 countLeadingZerosHigh[] = { + static const int8_t countLeadingZerosHigh[] = { 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, @@ -696,7 +696,7 @@ static int8 countLeadingZeros32( uint32_t a ) 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - int8 shiftCount; + int8_t shiftCount; shiftCount = 0; if ( a < 0x10000 ) { @@ -717,7 +717,7 @@ static int8 countLeadingZeros32( uint32_t a ) | `a'. If `a' is zero, 64 is returned. *----------------------------------------------------------------------------*/ -static int8 countLeadingZeros64( uint64_t a ) +static int8_t countLeadingZeros64( uint64_t a ) { #if SOFTFLOAT_GNUC_PREREQ(3, 4) if (a) { @@ -726,7 +726,7 @@ static int8 countLeadingZeros64( uint64_t a ) return 64; } #else - int8 shiftCount; + int8_t shiftCount; shiftCount = 0; if ( a < ( (uint64_t) 1 )<<32 ) { diff --git a/fpu/softfloat-specialize.h b/fpu/softfloat-specialize.h index 6dd41d8978..0875436b83 100644 --- a/fpu/softfloat-specialize.h +++ b/fpu/softfloat-specialize.h @@ -174,7 +174,7 @@ const float128 float128_default_nan | should be simply `float_exception_flags |= flags;'. *----------------------------------------------------------------------------*/ -void float_raise(int8 flags, float_status *status) +void float_raise(int8_t flags, float_status *status) { status->float_exception_flags |= flags; } diff --git a/fpu/softfloat.c b/fpu/softfloat.c index 4ee98c4e81..850d08fc17 100644 --- a/fpu/softfloat.c +++ b/fpu/softfloat.c @@ -146,9 +146,9 @@ static inline flag extractFloat16Sign(float16 a) static int32_t roundAndPackInt32(flag zSign, uint64_t absZ, float_status *status) { - int8 roundingMode; + int8_t roundingMode; flag roundNearestEven; - int8 roundIncrement, roundBits; + int8_t roundIncrement, roundBits; int32_t z; roundingMode = status->float_rounding_mode; @@ -201,7 +201,7 @@ static int32_t roundAndPackInt32(flag zSign, uint64_t absZ, float_status *status static int64_t roundAndPackInt64(flag zSign, uint64_t absZ0, uint64_t absZ1, float_status *status) { - int8 roundingMode; + int8_t roundingMode; flag roundNearestEven, increment; int64_t z; @@ -258,7 +258,7 @@ static int64_t roundAndPackInt64(flag zSign, uint64_t absZ0, uint64_t absZ1, static int64_t roundAndPackUint64(flag zSign, uint64_t absZ0, uint64_t absZ1, float_status *status) { - int8 roundingMode; + int8_t roundingMode; flag roundNearestEven, increment; roundingMode = status->float_rounding_mode; @@ -358,7 +358,7 @@ float32 float32_squash_input_denormal(float32 a, float_status *status) static void normalizeFloat32Subnormal(uint32_t aSig, int_fast16_t *zExpPtr, uint32_t *zSigPtr) { - int8 shiftCount; + int8_t shiftCount; shiftCount = countLeadingZeros32( aSig ) - 8; *zSigPtr = aSig<<shiftCount; @@ -410,9 +410,9 @@ static inline float32 packFloat32(flag zSign, int_fast16_t zExp, uint32_t zSig) static float32 roundAndPackFloat32(flag zSign, int_fast16_t zExp, uint32_t zSig, float_status *status) { - int8 roundingMode; + int8_t roundingMode; flag roundNearestEven; - int8 roundIncrement, roundBits; + int8_t roundIncrement, roundBits; flag isTiny; roundingMode = status->float_rounding_mode; @@ -485,7 +485,7 @@ static float32 normalizeRoundAndPackFloat32(flag zSign, int_fast16_t zExp, uint32_t zSig, float_status *status) { - int8 shiftCount; + int8_t shiftCount; shiftCount = countLeadingZeros32( zSig ) - 1; return roundAndPackFloat32(zSign, zExp - shiftCount, zSig<<shiftCount, @@ -551,7 +551,7 @@ float64 float64_squash_input_denormal(float64 a, float_status *status) static void normalizeFloat64Subnormal(uint64_t aSig, int_fast16_t *zExpPtr, uint64_t *zSigPtr) { - int8 shiftCount; + int8_t shiftCount; shiftCount = countLeadingZeros64( aSig ) - 11; *zSigPtr = aSig<<shiftCount; @@ -603,7 +603,7 @@ static inline float64 packFloat64(flag zSign, int_fast16_t zExp, uint64_t zSig) static float64 roundAndPackFloat64(flag zSign, int_fast16_t zExp, uint64_t zSig, float_status *status) { - int8 roundingMode; + int8_t roundingMode; flag roundNearestEven; int_fast16_t roundIncrement, roundBits; flag isTiny; @@ -677,7 +677,7 @@ static float64 normalizeRoundAndPackFloat64(flag zSign, int_fast16_t zExp, uint64_t zSig, float_status *status) { - int8 shiftCount; + int8_t shiftCount; shiftCount = countLeadingZeros64( zSig ) - 1; return roundAndPackFloat64(zSign, zExp - shiftCount, zSig<<shiftCount, @@ -731,7 +731,7 @@ static inline flag extractFloatx80Sign( floatx80 a ) static void normalizeFloatx80Subnormal( uint64_t aSig, int32_t *zExpPtr, uint64_t *zSigPtr ) { - int8 shiftCount; + int8_t shiftCount; shiftCount = countLeadingZeros64( aSig ); *zSigPtr = aSig<<shiftCount; @@ -778,11 +778,11 @@ static inline floatx80 packFloatx80( flag zSign, int32_t zExp, uint64_t zSig ) | Floating-Point Arithmetic. *----------------------------------------------------------------------------*/ -static floatx80 roundAndPackFloatx80(int8 roundingPrecision, flag zSign, +static floatx80 roundAndPackFloatx80(int8_t roundingPrecision, flag zSign, int32_t zExp, uint64_t zSig0, uint64_t zSig1, float_status *status) { - int8 roundingMode; + int8_t roundingMode; flag roundNearestEven, increment, isTiny; int64_t roundIncrement, roundMask, roundBits; @@ -974,12 +974,12 @@ static floatx80 roundAndPackFloatx80(int8 roundingPrecision, flag zSign, | normalized. *----------------------------------------------------------------------------*/ -static floatx80 normalizeRoundAndPackFloatx80(int8 roundingPrecision, +static floatx80 normalizeRoundAndPackFloatx80(int8_t roundingPrecision, flag zSign, int32_t zExp, uint64_t zSig0, uint64_t zSig1, float_status *status) { - int8 shiftCount; + int8_t shiftCount; if ( zSig0 == 0 ) { zSig0 = zSig1; @@ -1060,7 +1060,7 @@ static void uint64_t *zSig1Ptr ) { - int8 shiftCount; + int8_t shiftCount; if ( aSig0 == 0 ) { shiftCount = countLeadingZeros64( aSig1 ) - 15; @@ -1131,7 +1131,7 @@ static float128 roundAndPackFloat128(flag zSign, int32_t zExp, uint64_t zSig0, uint64_t zSig1, uint64_t zSig2, float_status *status) { - int8 roundingMode; + int8_t roundingMode; flag roundNearestEven, increment, isTiny; roundingMode = status->float_rounding_mode; @@ -1249,7 +1249,7 @@ static float128 normalizeRoundAndPackFloat128(flag zSign, int32_t zExp, uint64_t zSig0, uint64_t zSig1, float_status *status) { - int8 shiftCount; + int8_t shiftCount; uint64_t zSig2; if ( zSig0 == 0 ) { @@ -1297,7 +1297,7 @@ float64 int32_to_float64(int32_t a, float_status *status) { flag zSign; uint32_t absA; - int8 shiftCount; + int8_t shiftCount; uint64_t zSig; if ( a == 0 ) return float64_zero; @@ -1320,7 +1320,7 @@ floatx80 int32_to_floatx80(int32_t a, float_status *status) { flag zSign; uint32_t absA; - int8 shiftCount; + int8_t shiftCount; uint64_t zSig; if ( a == 0 ) return packFloatx80( 0, 0, 0 ); @@ -1342,7 +1342,7 @@ float128 int32_to_float128(int32_t a, float_status *status) { flag zSign; uint32_t absA; - int8 shiftCount; + int8_t shiftCount; uint64_t zSig0; if ( a == 0 ) return packFloat128( 0, 0, 0, 0 ); @@ -1364,7 +1364,7 @@ float32 int64_to_float32(int64_t a, float_status *status) { flag zSign; uint64_t absA; - int8 shiftCount; + int8_t shiftCount; if ( a == 0 ) return float32_zero; zSign = ( a < 0 ); @@ -1415,7 +1415,7 @@ floatx80 int64_to_floatx80(int64_t a, float_status *status) { flag zSign; uint64_t absA; - int8 shiftCount; + int8_t shiftCount; if ( a == 0 ) return packFloatx80( 0, 0, 0 ); zSign = ( a < 0 ); @@ -1435,7 +1435,7 @@ float128 int64_to_float128(int64_t a, float_status *status) { flag zSign; uint64_t absA; - int8 shiftCount; + int8_t shiftCount; int32_t zExp; uint64_t zSig0, zSig1; |