I'm writing software for counting preferential multi-seat elections. One common requirement is fixed precision. This means that all math operations must be done on values with a fixed specified precision and the result must have the same precision. Fixed precision means a set number of digits after the decimal. Any digits after that are discarded.
So if we assume 5 digits of precision:
42/139
becomes:
42.00000/139.00000 = 0.30215
I'm having problems writing unit tests for this. So far I've written these two tests for big and small numbers.
public void TestPrecisionBig()
{
PRECISION = 5;
decimal d = Precision(1987.7845263487169386183643876m);
Assert.That(d == 1987.78452m);
}
public void TestPrecisionSmall()
{
PRECISION = 5;
decimal d = Precision(42);
Assert.That(d == 42.00000m);
}
But it evaluates to 42 == 42.00000m Not what I want.
How do I test this? I guess I could do a d.ToString, but would that be a good "proper" test?
Edit: I was asked to show my implementation of the Precision method. It's not very elegant, but it works.
public static decimal Precision(decimal d)
{
if (d == 0) return 0.00000m;
decimal output = Math.Round(d, 6);
string s = output.ToString(CurrentCulture);
char c = char.Parse(CurrentCulture.NumberFormat.NumberDecimalSeparator);
if (s.Contains(c))
{
output = decimal.Parse(s.Substring(0, s.Length - 1));
return output;
}
s += c;
for (int i = 0; i <= Constants.PRECISION; i++) s += '0';
output = decimal.Parse(s.Substring(0, s.IndexOf(c) + Constants.PRECISION + 1));
return output;
}
Now I'll probably see if I can't just set the exponent directly.
Edit 2: New bit-jugling precision method
public static decimal Precision(decimal d)
{
if (d == 0) return 0.00000m;
string exponent = System.Convert.ToString(Constants.PRECISION, 2);
exponent = exponent.PadLeft(8, '0');
int positive = Convert.ToInt32("00000000" + exponent + "0000000000000000", 2);
int negative = Convert.ToInt32("10000000" + exponent + "0000000000000000", 2);
int preScaler = (int)Math.Pow(10, Constants.PRECISION);
d *= preScaler;
d = decimal.Truncate(d);
int[] bits = decimal.GetBits(d);
bits[3] = (bits[3] & 0x80000000) == 0 ? positive : negative;
return new decimal(bits);
}