typedefunsignedchar BYTE;
float bytesToFloatIEEE(std::vector<BYTE> b)
{
float finalResult;
// Convert the array of characters to string
std::string bString(b.begin(),b.end());
// DEBUG!!!
std::cout<<"BITSET STRING: "<<bString<<std::endl;
std::bitset<32> set(bString);
int HexNumber = set.to_ulong();
bool negative = !!(HexNumber & 0x80000000);
int exponent = (HexNumber & 0x7f800000) >> 23;
int sign = negative ? -1 : 1;
// Subtract 127 from the exponent
exponent -= 127;
// Convert the mantissa into decimal using the
// last 23 bits
int power = -1;
float total = 0.0;
for ( int i = 0; i < 23; i++ )
{
int c = b[ i + 9 ] - '0';
total += (float) c * (float) pow( 2.0, power );
power--;
}
total += 1.0;
finalResult = sign * (float) pow( 2.0, exponent ) * total;
return finalResult;
}
However, this code gives the following error: std::invalid_argument: bitset string ctor has invalid argument
1) Is there a simpler way to convert a vector of unsigned characters to IEEE 754 float?
2) Why does bitset have invalid argument?