I`m tring to get depth using depth camera of blender instead of cv2.StereoBM_create.compute and depth map looks so much better. but when get 3d points by it with using of a new function (depth_to_3d) the 3d points are not equal the coordinates of objects in my blender scene.these arrays of numbers needs to become equal so can go to the next step. I’m willing to detect some boxes in blender that simulated as an animation and each box’s location changes in x and y axis. finally identify each of them with particular ids. I used stereo vision of open cv for this purpose and for camera calibration implement blender camera calibration of GitHub link:
https://github.com/ynyBonfennil/Blender-Camera-Calibration/blob/main/camera_calibration.py
Cheers
cam = bpy.data.objects.get('Camera')
bpy.context.scene.use_nodes = True
tree = bpy.context.scene.node_tree
links = tree.links
# Find existing nodes
depth_output = tree.nodes.get('Depth Output')
if not depth_output:
depth_output = tree.nodes.new(type="CompositorNodeOutputFile")
depth_output.name = 'Depth Output'
depth_output.format.file_format = 'TIFF'
depth_output.base_path = output_dir
depth_output.file_slots[0].path = "depth"
# Set render settings for color image
color_image_output = tree.nodes.get('Color Output')
color_image_output.format.file_format = 'JPEG'
color_image_output.base_path = output_dir
color_image_output.file_slots[0].path = "color"
bpy.ops.render.render(write_still=True)
depth_image = cv2.imread(depth_image_path, cv2.IMREAD_ANYDEPTH)
depth_image_inverted = cv2.bitwise_not(depth_image)/1000
# Visualize the disparity map
plt.imshow(depth_image_inverted, cmap='CMRmap_r')
plt.title('Disparity Map')
plt.colorbar()
plt.show()
img_color = cv2.imread(color_image_path)
################################### Camera calibration #################################
def get_calibration_matrix_K_from_blender(camd):
scene = bpy.context.scene
resolution_x_in_px = scene.render.resolution_x
resolution_y_in_px = scene.render.resolution_y
f_in_mm = camd.lens
sensor_width_in_mm = camd.sensor_width
sensor_height_in_mm = camd.sensor_height
pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
if camd.sensor_fit == 'VERTICAL':
s_u = resolution_y_in_px / sensor_height_in_mm
else:
s_u = resolution_x_in_px / sensor_width_in_mm
s_v = s_u / pixel_aspect_ratio
u_0 = resolution_x_in_px / 2
v_0 = resolution_y_in_px / 2
K = np.array([[s_u, 0, u_0],
[0, s_v, v_0],
[0, 0, 1]], dtype=np.float32)
return K
K = get_calibration_matrix_K_from_blender(cam.data)
##################################### Depth to 3D conversion ###################################################
def depth_to_3d(depth_map, K):
h, w = depth_map.shape
i, j = np.meshgrid(np.arange(w), np.arange(h), indexing='xy')
uv_homogeneous = np.stack((i, j, np.ones_like(i)), axis=-1)
K_inv = np.linalg.inv(K)
xyz = np.dot(uv_homogeneous, K_inv.T) * depth_map[..., None]
return xyz
points_3d = depth_to_3d(depth_image, K)
output_file = 'reconstructed_from_depth.ply'
with open(output_file, 'w') as f:
f.write('plyn')
f.write('format ascii 1.0n')
f.write('element vertex {}n'.format(points_3d.shape[0] * points_3d.shape[1]))
f.write('property float xn')
f.write('property float yn')
f.write('property float zn')
f.write('end_headern')
for row in points_3d:
for point in row:
f.write('{} {} {}n'.format(point[0], point[1], point[2]))
print('3D reconstruction saved to', output_file)
print(points_3d.shape)
#print(points_3d[0][0][0] > 2.5)
for i in range(1080):
for j in range(1920):
if not -1 < points_3d[i][j][0] < 1 :
points_3d[i][j] = points_3d[i][j]/47500
##################################### Reprojection of 3D points to 2D using cv2.projectPoints ###############################################
def reproject_points_to_2d(points_3d, camera_matrix, dist_coeffs, rvec, tvec):
points_2d, _ = cv2.projectPoints(points_3d, rvec, tvec, camera_matrix, dist_coeffs)
return points_2d
# Assuming no lens distortion
dist_coeffs = np.zeros((4, 1))
# Camera rotation and translation vectors (assuming no rotation and at origin for simplicity)
rvec = np.zeros((3, 1))
tvec = np.zeros((3, 1))
# Flatten the points_3d array for projection
points_3d_flat = points_3d.reshape(-1, 3)
# Reproject the 3D points to 2D
projected_points_2d = reproject_points_to_2d(points_3d_flat, K, dist_coeffs, rvec, tvec)
# Reshape the projected 2D points to match image dimensions
projected_points_2d = projected_points_2d.reshape(depth_image.shape[0], depth_image.shape[1], 2)
# Create a blank image to visualize the reprojected points
blank_img = np.zeros((depth_image.shape[0], depth_image.shape[1], 3), dtype=np.uint8)
# Visualize the reprojected points on the blank image
for point in projected_points_2d.reshape(-1, 2):
cv2.circle(blank_img, (int(point[0]), int(point[1])), 1, (255, 0, 0), -1)
cv2.imwrite("C:/tmp/reprojected_2D_image.jpg", blank_img)
plt.imshow(cv2.cvtColor(blank_img, cv2.COLOR_BGR2RGB))
plt.title('Reprojected Points')
plt.show()