I have been trying to create smooth interpolated images between two images (they might be a bit rotated, translated and/or zoomed) and came up with this function
<code>def interpolate_images(image1, image2, n_interpolations):
# Convert to grayscale for feature detection
gray1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
# Detect ORB keypoints and descriptors
kp1, des1 = orb.detectAndCompute(gray1, None)
kp2, des2 = orb.detectAndCompute(gray2, None)
# Match features using brute-force matcher
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1, des2)
matches = sorted(matches, key=lambda x: x.distance)
# Extract location of good matches
src_pts = np.float32([kp1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)
M, mask = cv2.findHomography(dst_pts, src_pts, cv2.RANSAC, 5.0)
# Identity matrix for the first image
identity_matrix = np.eye(3)
# Interpolation between identity matrix and homography
for i in range(n_interpolations + 1):
alpha = i / n_interpolations
# Interpolated transformation matrix
interpolated_matrix = (1 - alpha) * identity_matrix + alpha * M
# Warp the first image with the interpolated matrix
warped_image1 = cv2.warpPerspective(image1, interpolated_matrix, (w, h))
interpolated_images.append(warped_image1)
return interpolated_images
<code>def interpolate_images(image1, image2, n_interpolations):
# Convert to grayscale for feature detection
gray1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
# Detect ORB keypoints and descriptors
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(gray1, None)
kp2, des2 = orb.detectAndCompute(gray2, None)
# Match features using brute-force matcher
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1, des2)
matches = sorted(matches, key=lambda x: x.distance)
# Extract location of good matches
src_pts = np.float32([kp1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)
# Find homography matrix
M, mask = cv2.findHomography(dst_pts, src_pts, cv2.RANSAC, 5.0)
# Identity matrix for the first image
identity_matrix = np.eye(3)
h, w = image1.shape[:2]
# Interpolation between identity matrix and homography
interpolated_images = []
for i in range(n_interpolations + 1):
alpha = i / n_interpolations
# Interpolated transformation matrix
interpolated_matrix = (1 - alpha) * identity_matrix + alpha * M
# Warp the first image with the interpolated matrix
warped_image1 = cv2.warpPerspective(image1, interpolated_matrix, (w, h))
interpolated_images.append(warped_image1)
return interpolated_images
</code>
def interpolate_images(image1, image2, n_interpolations):
# Convert to grayscale for feature detection
gray1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
# Detect ORB keypoints and descriptors
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(gray1, None)
kp2, des2 = orb.detectAndCompute(gray2, None)
# Match features using brute-force matcher
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1, des2)
matches = sorted(matches, key=lambda x: x.distance)
# Extract location of good matches
src_pts = np.float32([kp1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)
# Find homography matrix
M, mask = cv2.findHomography(dst_pts, src_pts, cv2.RANSAC, 5.0)
# Identity matrix for the first image
identity_matrix = np.eye(3)
h, w = image1.shape[:2]
# Interpolation between identity matrix and homography
interpolated_images = []
for i in range(n_interpolations + 1):
alpha = i / n_interpolations
# Interpolated transformation matrix
interpolated_matrix = (1 - alpha) * identity_matrix + alpha * M
# Warp the first image with the interpolated matrix
warped_image1 = cv2.warpPerspective(image1, interpolated_matrix, (w, h))
interpolated_images.append(warped_image1)
return interpolated_images
However, when I run it with two very similar images like
and with the code
<code>img1 = cv2.imread("image1.jpg")
img2 = cv2.imread("image2.jpg")
interpolated_frames = interpolate_images(img1, img2, n)
for index, image in enumerate(interpolated_frames):
cv2.imwrite(f"interp_{index}.jpg", image)
cv2.imwrite(f"interp_{n+1}.jpg", img2)
<code>img1 = cv2.imread("image1.jpg")
img2 = cv2.imread("image2.jpg")
# Interpolate frames
n = 4
interpolated_frames = interpolate_images(img1, img2, n)
for index, image in enumerate(interpolated_frames):
cv2.imwrite(f"interp_{index}.jpg", image)
cv2.imwrite(f"interp_{n+1}.jpg", img2)
</code>
img1 = cv2.imread("image1.jpg")
img2 = cv2.imread("image2.jpg")
# Interpolate frames
n = 4
interpolated_frames = interpolate_images(img1, img2, n)
for index, image in enumerate(interpolated_frames):
cv2.imwrite(f"interp_{index}.jpg", image)
cv2.imwrite(f"interp_{n+1}.jpg", img2)
there is still a rough cut between the last interpolated image (interp_4.jpg
) and the actual second image (image2.jpg
=interp_6.jpg
).
How can this be fixed, so I get a smooth transition interpolation between the two images image1
and image2
?
Ideally the interpolated images only show a combination of rotation, translation and zooming between the images image1
and image2
…
Also must not be OpenCV
, could also be a different library …