I'm trying to use this function to improve a HSV to RGB conversion but there is a little problem without using float...
HsvColor hsv;
unsigned char rgbMin, rgbMax;
rgbMin = rgb.r < rgb.g ? (rgb.r < rgb.b ? rgb.r : rgb.b) : (rgb.g < rgb.b ? rgb.g : rgb.b);
rgbMax = rgb.r > rgb.g ? (rgb.r > rgb.b ? rgb.r : rgb.b) : (rgb.g > rgb.b ? rgb.g : rgb.b);
hsv.v = rgbMax;
if (hsv.v == 0)
{
hsv.h = 0;
hsv.s = 0;
return hsv;
}
hsv.s = 255 * (long int)(rgbMax - rgbMin) / hsv.v;
if (hsv.s == 0)
{
hsv.h = 0;
return hsv;
}
if (rgbMax == rgb.r)
hsv.h = 0 + 43 * (rgb.g - rgb.b) / (rgbMax - rgbMin);
else if (rgbMax == rgb.g)
hsv.h = 85 + 43 * (rgb.b - rgb.r) / (rgbMax - rgbMin);
else
hsv.h = 171 + 43 * (rgb.r - rgb.g) / (rgbMax - rgbMin);
return hsv;
My problem is that I have (rgb.b-rgb.r)<1
, without using float I have the truncated value (so 0) and hsv.h
stay always at 85...
hsv.h = 85 + 43 * (rgb.b - rgb.r) / (rgbMax - rgbMin);
Does anyone have an idea for that?