Given the 32 bits that represent an IEEE 754 floating-point number, how can the number be converted to an integer, using integer or bit operations on the representation (rather than using a machine instruction or compiler operation to convert)?
I have the following function but it fails in some cases:
Input: int x (contains 32 bit single precision number in IEEE 754 format)
if(x == 0) return x;
unsigned int signBit = 0;
unsigned int absX = (unsigned int)x;
if (x < 0)
{
signBit = 0x80000000u;
absX = (unsigned int)-x;
}
unsigned int exponent = 158;
while ((absX & 0x80000000) == 0)
{
exponent--;
absX <<= 1;
}
unsigned int mantissa = absX >> 8;
unsigned int result = signBit | (exponent << 23) | (mantissa & 0x7fffff);
printf("\nfor x: %x, result: %x",x,result);
return result;