Here's some code that would do the association.
def get_colored_point_cloud(calib, rgb, depth):
"""
pass in rgb and associated depth map
return point cloud and color for each point
cloud.shape -> (num_points, 3) for [x, y, z]
colors.shape -> (num_points, 3) for [r, g, b]
"""
rows, cols = depth.shape
#create a grid and stack depth and rgb
c, r = np.meshgrid(np.arange(cols), np.arange(rows)) # c-> (cols, rows), r-> (cols, rows)
points = np.stack([c, r, depth]) # -> stacking (3, num_points)
colors = np.stack([c, r, rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]])
points = points.reshape(3, -1) #-> (3, num_points)
colors = colors.reshape(5, -1) #-> (5, num_points)
points = points.T #-> (num_points, 3)
colors = colors.T #-> (num_points, 5)
#now transform [u, v] to [x, y, z] by camera unprojection
cloud = unproject_image_to_point_cloud(points, calib.intrinsic_params) #-> (num_points, 3)
return cloud, colors[:,2:5] # (num_points, 3), (num_points, 3)
It is also possible to do this through open3d. But you will have to deal with practical matters of getting the view as desired for it to work in open3d.
See this post: Generate point cloud from depth image
The more direct way of doing this instead of the somewhat ugly meshgrid process (at least the way I have written it) is by creating separate arrays for point(col_index, row_index, z)
and color(col_index, row_index, R, G, B)
, and transforming (col_index, row_index,z)
to (x, y, z)
in an unrolled way for each point, but this is much slower as it does not use numpy vectorization magic under the hood.
def get_colored_point_cloud(calib, rgb, depth):
points = []
colors = []
rows, cols = depth.shape
for i in range(rows):
for j in range(cols):
z = depth[i, j]
r = rgb[i,j,0]
g = rgb[i,j,1]
b = rgb[i,j,2]
points.append([j,i,z])
colors.append([r,g,b])
points = np.asarray(points)
colors = np.asarray(colors)
cloud = unproject_image_to_point_cloud(points,\
calib.intrinsic_params) #-> (num_points, 3)
return cloud