I am trying to stitch or project three images from three 120 degree FoV cameras that have been aligned to perfectly cover a 360 degree FoV. They have no overlap so the common stitching techniques using opencv’s Stitcher class will not work as relies on overlapping and detecting matching orb points between the images. Basic concatenation does not work either as they need to be warped or projected on to a cylinder in order to get the correct 360 degree image. I am using the CARLA Simulator and Python library.
Camera setup code:
def create_camera_setup(world, blueprint_library, vehicle):
# Create a camera blueprint
camera_bp = blueprint_library.find('sensor.camera.rgb')
camera_bp.set_attribute('fov', '120')
camera_bp.set_attribute('image_size_x', '1024')
camera_bp.set_attribute('image_size_y', '728')
# Set up forward-facing cameras
camera_left = spawn_camera(world, camera_bp, vehicle, carla.Transform(
carla.Location(x=0.2, y=-0.3, z=1.3),
carla.Rotation(pitch=0, yaw=-60, roll=0)
))
camera_right = spawn_camera(world, camera_bp, vehicle, carla.Transform(
carla.Location(x=0.2, y=0.3, z=1.3),
carla.Rotation(pitch=0, yaw=60, roll=0)
))
# Set up rear-facing camera
camera_rear = spawn_camera(world, camera_bp, vehicle, carla.Transform(
carla.Location(x=0.2, y=0, z=1.3),
carla.Rotation(pitch=0, yaw=180, roll=0)
))
# Set up IMU
imu_bp = blueprint_library.find('sensor.other.imu')
imu = world.spawn_actor(imu_bp, carla.Transform(carla.Location(x=1.6, y=0, z=1.7)), attach_to=vehicle)
# Set up GPS
gps_bp = blueprint_library.find('sensor.other.gnss')
gps = world.spawn_actor(gps_bp, carla.Transform(carla.Location(x=1.6, y=0, z=1.7)), attach_to=vehicle)
return {
'camera_left': camera_left,
'camera_right': camera_right,
'camera_rear': camera_rear,
'imu': imu,
'gps': gps
}
Current simple concatenation of the images:
def create_360_view(camera_left, camera_right, camera_rear):
left_img = camera_left.get_data()
right_img = camera_right.get_data()
rear_img = camera_rear.get_data()
combined_img = np.concatenate((left_img, right_img, rear_img), axis=1)
return combined_img
def display_image(display, image):
surface = pygame.surfarray.make_surface(image.swapaxes(0, 1))
display.blit(surface, (0, 0))
pygame.display.flip()
Camera Aspects: 120 degree fov, 1024×728
Resulting image:
https://ibb.co/FnFhBcp
I have tried some transformation work as seen below but the images have gaps between them when projected:
def create_360_view(camera_left, camera_right, camera_rear):
left_img = camera_left.get_data()
right_img = camera_right.get_data()
rear_img = camera_rear.get_data()
height, width = left_img.shape[:2]
panorama_width = width * 3
panorama = np.zeros((height, panorama_width, 3), dtype=np.uint8)
place_image_cylindrical(panorama, left_img, 0, width)
place_image_cylindrical(panorama, right_img, width, width)
place_image_cylindrical(panorama, rear_img, 2*width, width)
return panorama
def place_image_cylindrical(panorama, img, start_x, width):
height, img_width = img.shape[:2]
cylinder_radius = width / (2 * np.pi / 3) # 120 degrees = 2π/3 radians
theta = np.linspace(-np.pi/3, np.pi/3, width)
h = np.arange(height) - height // 2
theta, h = np.meshgrid(theta, h)
x = np.round(cylinder_radius * np.tan(theta) + img_width / 2).astype(int)
y = np.round(h * np.cos(theta) + height / 2).astype(int)
mask = (x >= 0) & (x < img_width) & (y >= 0) & (y < height)
panorama_x = np.round(theta * width / (2*np.pi/3) + start_x).astype(int)
panorama_y = h + height // 2
valid_mask = mask & (panorama_x >= 0) & (panorama_x < panorama.shape[1]) & (panorama_y >= 0) & (panorama_y < panorama.shape[0])
panorama[panorama_y[valid_mask], panorama_x[valid_mask]] = img[y[valid_mask], x[valid_mask]]
Resulting image:
https://ibb.co/Mk2FXtV
Georgino Saramello is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.
12