I am trying to use Morton code to produce unique encoding for a given (x,y,z) where x,y,z are double precision floating point numbers. I presume that I can use type cast to convert the floats to integers and run Morton ordering on those integers. For example consider the following C++
code. (I don't now how to do the same in C
)
double x=-1.123456789123456E205;
int64_t i = reinterpret_cast<int64_t &>(x);
cout<<i<<endl;
output >>> i = -1548698869907112442
And ditto for the reaming x,y
. Once I have the "reinterpreted" values ,I would like to use them as a subroutine for Morton coding.
I checked the above type cast and it worked fine in reverse
double y = reinterpret_cast<double &>(i);
cout<<setprecision(16)<<y<<endl;
output>>-1.123456789123456e+205
I managed to find some codes for Morton coding, even some of them on this forum, but non of them used int64_t
in 3D. Hence I am going to need the help of the experts on the forum, how to encode and decode int64_t
integers.
I managed to reverse engineer the following code. Unfortunately there is some bug, I am not getting the proper numbers when I run the decode part. I would appreciate any help to figure out what is wrong.
2D morton code encode/decode 64bits.
#include <iostream>
#include <stdint.h>
#include<iomanip>
using namespace std;
uint64_t code_2D_M(double xd,double yd){
uint64_t x = reinterpret_cast<uint64_t& >(xd);
uint64_t y = reinterpret_cast<uint64_t& >(yd);
x = (x | (x << 16)) & 0x0000FFFF0000FFFF;
x = (x | (x << 8)) & 0x00FF00FF00FF00FF;
x = (x | (x << 4)) & 0x0F0F0F0F0F0F0F0F;
x = (x | (x << 2)) & 0x3333333333333333;
x = (x | (x << 1)) & 0x5555555555555555;
y = (y | (y << 16)) & 0x0000FFFF0000FFFF;
y = (y | (y << 8)) & 0x00FF00FF00FF00FF;
y = (y | (y << 4)) & 0x0F0F0F0F0F0F0F0F;
y = (y | (y << 2)) & 0x3333333333333333;
y = (y | (y << 1)) & 0x5555555555555555;
return x | (y << 1);
}
uint64_t code_3D_M(double xd,double yd,double zd){
uint64_t x = reinterpret_cast<uint64_t& >(xd);
uint64_t y = reinterpret_cast<uint64_t& >(yd);
uint64_t z = reinterpret_cast<uint64_t& >(zd);
x = (x | (x << 16)) & 0x0000FFFF0000FFFF;
x = (x | (x << 8)) & 0x00FF00FF00FF00FF;
x = (x | (x << 4)) & 0x0F0F0F0F0F0F0F0F;
x = (x | (x << 2)) & 0x3333333333333333;
x = (x | (x << 1)) & 0x5555555555555555;
y = (y | (y << 16)) & 0x0000FFFF0000FFFF;
y = (y | (y << 8)) & 0x00FF00FF00FF00FF;
y = (y | (y << 4)) & 0x0F0F0F0F0F0F0F0F;
y = (y | (y << 2)) & 0x3333333333333333;
y = (y | (y << 1)) & 0x5555555555555555;
z = (y | (y << 16)) & 0x0000FFFF0000FFFF;
z = (y | (y << 8)) & 0x00FF00FF00FF00FF;
z = (y | (y << 4)) & 0x0F0F0F0F0F0F0F0F;
z = (y | (y << 2)) & 0x3333333333333333;
z = (y | (y << 1)) & 0x5555555555555555;
return x | (y << 1) | (z << 2);
}
double decode_M(uint64_t x)
{
x = x & 0x5555555555555555;
x = (x | (x >> 1)) & 0x3333333333333333;
x = (x | (x >> 2)) & 0x0F0F0F0F0F0F0F0F;
x = (x | (x >> 4)) & 0x00FF00FF00FF00FF;
x = (x | (x >> 8)) & 0x0000FFFF0000FFFF;
x = (x | (x >> 16)) & 0xFFFFFFFFFFFFFFFF;
return reinterpret_cast<double& >(x);
}
int main (void){
uint64_t mort;
double x,y,z;
// test input
x=2.123456789123459E205;
y=1.789789123456129E205;
z=9.999999912345779E205;
// echo the input
cout<<setprecision(17)<<x<<endl;
cout<<setprecision(17)<<y<<endl;
cout<<setprecision(17)<<z<<endl;
// encode 2D case
mort = code_2D_M(x,y);
//decode and print the results to see if all was fine
cout<<setprecision(17)<<decode_M(mort>>0)<<endl;
cout<<setprecision(17)<<decode_M(mort>>1)<<endl;
// encode 3D case
mort = code_3D_M(x,y,z);
//decode and print the results to see if all was fine
cout<<setprecision(17)<<decode_M(mort>>0)<<endl;
cout<<setprecision(17)<<decode_M(mort>>1)<<endl;
cout<<setprecision(17)<<decode_M(mort>>2)<<endl;
return 0;
}
I am doing this because I would like not storing the coordinates as a 3D point (x,y,z) but rather as a single long integer and decode them when needed. By doing so I will reduce the size of my coordinate storage array 3-fold.