Code:
int_t alu_match_exponents
(
alu_t *alu
, uint_t num
, uint_t val
, uint_t tmp
)
{
if ( alu )
{
int_t ret;
alu_reg_t NUM, VAL, NEXP, VEXP, NMAN, VMAN, TMP;
size_t nexp;
alu_reg_init_floating( alu, NUM, num );
alu_reg_init_floating( alu, VAL, val );
alu_reg_init_unsigned( alu, TMP, tmp );
NUM.upto = alu_Nbits(alu);
VAL.upto = alu_Nbits(alu);
alu_reg_init_mantissa( NUM, NMAN );
alu_reg_init_exponent( NUM, NEXP );
alu_reg_init_mantissa( VAL, VMAN );
alu_reg_init_exponent( VAL, VEXP );
ret = alu_reg_get_raw( alu, NEXP, &nexp, sizeof(size_t) );
if ( ret == 0 )
{
size_t vexp, diff;
bool truncated = false;
(void)alu_reg_get_raw( alu, VEXP, &vexp, sizeof(ssize_t) );
alu->block.fault = 0;
if ( nexp > vexp )
{
diff = nexp - vexp;
/* Match exponent and align mantissa */
alu_reg_int2int( alu, VEXP, NEXP );
alu_reg__shr( alu, VMAN, TMP, diff );
truncated = (alu_errno(alu) == ERANGE);
/* Insert assumed bit back into position */
if ( diff < VMAN.upto )
{
void *V = alu_reg_data( alu, VAL );
alu_bit_t v = alu_bit( V, VMAN.upto - diff );
*(v.ptr) |= v.mask;
/* Normalise */
diff = VMAN.upto - v.bit;
}
}
else if ( vexp > nexp )
{
diff = vexp - nexp;
alu_reg_int2int( alu, NEXP, VEXP );
alu_reg__shr( alu, NMAN, TMP, diff );
truncated = (alu_errno(alu) == ERANGE);
/* Insert assumed bit back into position */
if ( diff < NMAN.upto )
{
void *N = alu_reg_data( alu, NUM );
alu_bit_t n = alu_bit( N, NMAN.upto - diff );
*(n.ptr) |= n.mask;
}
}
alu->block.fault = IFTRUE(truncated,ERANGE);
return 0;
}
alu_error(ret);
return ret;
}
return alu_err_null_ptr("alu");
}
int_t alu_reg_addition(
alu_t *alu
, alu_reg_t NUM
, alu_reg_t VAL
, uint_t cpy
, uint_t tmp
)
{
if ( alu )
{
int ret;
bool carry = false, changed = false;
alu_bit_t n, v;
size_t pos;
void *part;
alu_reg_t CPY, CEXP, CMAN, TEXP, TMAN, TMP;
NUM.node %= alu_used( alu );
VAL.node %= alu_used( alu );
ret = IFTRUE( !NUM.node || !VAL.node, EINVAL );
if ( ret )
{
alu_error( ret );
if ( !NUM.node ) alu_puts( "NUM.node was 0!" );
if ( !VAL.node ) alu_puts( "VAL.node was 0!" );
return ret;
}
if ( alu_reg_floating( VAL ) )
{
alu_reg_init_floating( alu, TMP, tmp );
TMP.upto = alu_Nbits(alu);
/* VAL is supposed to be unchanged so use VCPY instead */
ret = alu_reg_mov( alu, TMP, VAL );
if ( ret == 0 )
{
alu_reg_init_floating( alu, CPY, cpy );
CPY.upto = alu_Nbits(alu);
/* Need NUM to be unchanged so can restore details later,
* having both floats the same size also makes math easier,
* also no chance of failure here as the previous move
* succeeded in getting the same temporary registers this
* one will be looking for */
(void)alu_reg_mov( alu, CPY, NUM );
uint_t temp = alu_get_reg_node( alu, 0 );
if ( temp )
{
ret = alu_match_exponents( alu, cpy, tmp, temp );
alu_rem_reg_node( alu, &temp );
if ( ret == 0 )
{
bool truncated = (alu_errno(alu) == ERANGE);
uint_t nodes[2];
ret = alu_get_reg_nodes( alu, nodes, 2, 0 );
if ( ret == 0 )
{
alu_reg_t _CMAN, _TMAN;
size_t bias, cexp;
alu_reg_init_unsigned( alu, _CMAN, nodes[0] );
alu_reg_init_unsigned( alu, _TMAN, nodes[1] );
alu_reg_init_exponent( CPY, CEXP );
alu_reg_init_exponent( TMP, TEXP );
alu_reg_init_mantissa( CPY, CMAN );
alu_reg_init_mantissa( TMP, TMAN );
alu_reg_int2int( alu, _CMAN, CMAN );
alu_reg_int2int( alu, _TMAN, TMAN );
ret = alu_reg_add( alu, _CMAN, _TMAN );
switch ( ret )
{
case 0: case EOVERFLOW: case ENODATA: break;
default:
alu_rem_reg_nodes( alu, nodes, 2 );
alu_error(ret);
return ret;
}
n = alu_reg_end_bit( alu, _CMAN );
(void)alu_reg_get_exponent( alu, CPY, &cexp );
bias = alu_reg_get_exponent_bias( CPY );
cexp -= bias;
ret = alu_reg_set_raw( alu, CEXP, &(n.bit), sizeof(size_t), 0 );
/* No chance of failure at this point */
(void)alu_reg_mov( alu, NUM, CPY );
alu_rem_reg_nodes( alu, nodes, 2 );
return EITHER( truncated, ERANGE, ret );
}
}
}
else
{
ret = alu_errno(alu);
}
}
alu_error(ret);
return ret;
}
Gonna grab something to eat, hopefully someone manages to spot where I went wrong, as a reminder the vids I'm using to understand FPN addition are 2 posts prior to this one