When I attempted to Stitch multiple images together using the next lines of Open CV code but it failed
import cv2
imageStitcher = cv2.Stitcher.create()
error, stitched_img= imageStitcher.stitch(images)
error = 1
For what I found, this error is caused by not having enough details to create a proper stitching with the images.
If you have a better or simpler way to solve this and help me stitch the images I would love to see it (as this needs to run for multiple folders and samples).
I tried FIJIs stitcher and it does work, which leaves me with the question of how to make this work?
I knew that there was some proccessing to do in order to make it work (Brightness, contrast, CLAHE, Histograms, and croping adjustments) but it gave me the same error message.
I wrote code for it, and got to the point were I could stitch the first two images (1-L and 2-L) but the perspective would be destroyed afterwards with 3-L, I don’t really understand why this is happening even when I see that the keys and features are being recognized well in each image (using SIFT and KNN) (In a Anaconda environment with Spyder).
(add the images)
from pathlib import Path
from rembg import remove
from matplotlib import pyplot as plt
import numpy as np
import os
import cv2
#--------------------------------------------------------------------------------------------------------------------
"""Functions needed for stitching"""
def select_descriptor_method (img, method=None):
"""sistem that allows us to create and access the feature descriptors eassier
it takes an image as it's input and returns the keypoints to match and their features depending on the method selected'
"""
assert method is not None, "Please degine a descriptor method. Accepted values are: 'sift', 'surf', 'orb', 'brisk' "
if method == 'sift':
descriptor=cv2.SIFT_create()
if method == 'surf':
descriptor=cv2.SURF_create()
if method == 'brisk':
descriptor=cv2.BRISK_create()
if method == 'orb':
descriptor=cv2.ORB_create()
(keypoints, features)=descriptor.detectAndCompute(img,None)
return (keypoints, features)
#--------------------------------------------------------------------------------------------------------------------
def create_matching_object(method, crossCheck):
"object needed for matching based on the method"
if method == 'sift' or method == 'surf':
bf=cv2.BFMatcher(cv2.NORM_L2, crossCheck=crossCheck)
elif method=='orb' or method =='brisk':
bf= cv2.BFMatcher(cv2.NORM_HAMMING,crossCheck=crossCheck)
return bf
#--------------------------------------------------------------------------------------------------------------------
def key_points_matching(feat1, feat2, method):
"matching the keypoints by brute force"
bf=create_matching_object(method,crossCheck=True)
best_matches=bf.match(feat1, feat2)
raw_matches= sorted(best_matches, key=lambda x: x.distance)
print('Raw matches with brute force',len(raw_matches))
return raw_matches
#--------------------------------------------------------------------------------------------------------------------
def key_points_matching_KNN(feat1, feat2, key1, key2, ratio, method):
"matching the keypoints by KNN"
bf=create_matching_object(method,crossCheck=False)
raw_matches=bf.knnMatch(feat1,feat2,k=2)
print('Raw matches with knn',len(raw_matches))
knn_matches = []
for m, n in raw_matches:
pt1 = key1[m.queryIdx].pt
pt2 = key2[m.trainIdx].pt
slope = calculate_slope(pt1, pt2)
" add a condition for the proximitie of points "
"adde condition as the images are knowed to be on the same plane (almost straight) to prevent points with high slopes to cross"
if m.distance < n.distance * ratio and abs(slope) <= max_slope:
knn_matches.append(m)
return knn_matches
#--------------------------------------------------------------------------------------------------------------------
def calculate_slope(pt1, pt2):
"""
Calculates the slope between two points.
Args:
pt1 (tuple): Coordinates of the first point (x, y).
pt2 (tuple): Coordinates of the second point (x, y).
Returns:
float: The slope between the two points.
"""
if pt1[0] == pt2[0]: # Avoid division by zero
return float('inf') # Set slope to infinity if x-coordinates are equal
else:
return (pt2[1] - pt1[1]) / (pt2[0] - pt1[0])
#--------------------------------------------------------------------------------------------------------------------
def homography_stitching(key1, key2, matches, reprojThresh):
"stitching proces"
key1 = np.float32([keypoint.pt for keypoint in key1])
key2 = np.float32([keypoint.pt for keypoint in key2])
if len(matches) >4:
points_img1 = np.float32([key1[m.queryIdx] for m in matches])
points_img2 = np.float32([key2[m.trainIdx] for m in matches])
(H, status) = cv2.findHomography(points_img1,
points_img2,
cv2.RANSAC,
reprojThresh)
return (matches, H, status)
else:
return None
#--------------------------------------------------------------------------------------------------------------------
def plot_stitching_points(img1,img2,key):
"Plotting for one of the keypoints pairs to see the distribution"
key1,key2=key
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(10, 10), constrained_layout=False) # Adjust figsize for desired output size
# Flatten the 2D array of subplots into a 1D list for easier indexing
axes_flat = axes.flatten()
axes_flat[0].imshow(cv2.drawKeypoints(img2,
key2,
None,
color=(0,255,0))) # Assuming data represents an image (adjust for other data types)
axes_flat[0].set_xlabel('[a]',fontsize=14) # Optional title for each figure
img1=images[0]
"cheack if its grey scale or colored"
if img1.shape[-1] == 4 or img1.shape[-1] == 3 :
"colored"
Height, Width, chanels= img1.shape
else:
"grey"
Height, Width = img1.shape
shift_amount = int(Width*0.5) # Move 1250 pixels to the right on the X-axis
keypoints_array = np.array([kp.pt for kp in key2], dtype=np.float32)
keypoints_array[:, 0] -= shift_amount
# Update keypoints list (optional, if you need the modified list)
key2_t = [cv2.KeyPoint(x, y, size=1) for x, y in keypoints_array]
axes_flat[1].imshow(cv2.drawKeypoints(img2[:,1250:],
key2_t,
None,
color=(0,255,0))) # Assuming data represents an image (adjust for other data types)
axes_flat[1].set_xlabel('[b]',fontsize=14) # Optional title for each figure
axes_flat[2].imshow(cv2.drawKeypoints(img1,
key1,
None,
color=(0,255,0))) # Assuming data represents an image (adjust for other data types)
axes_flat[2].set_xlabel('[c]',fontsize=14) # Optional title for each figure
axes_flat[3].imshow(cv2.drawKeypoints(img1[:,:1250],
key1,
None,
color=(0,255,0))) # Assuming data represents an image (adjust for other data types)
axes_flat[3].set_xlabel('[d]',fontsize=14) # Optional title for each figure
#--------------------------------------------------------------------------------------------------------------------
"""creating descriptors for each of the
pairs of images to try to match them
(by taking a section of the matching pieces)"""
def create_pair_descriptor(img1, img2, show_descriptors=False):
"liberate space in disc by creating a function that creates pair descriptors "
"cheack if its grey scale or colored"
if img1.shape[-1] == 4:
"colored"
Height, Width, chanels= img1.shape
else:
"grey"
Height, Width = img1.shape
# shift_amount = int(Width*0.5) # Move pixels to the right on the X-axis equal to half of the image width (for better matching)
"keypoints and features extracted from each image"
key1, feature1 = select_descriptor_method(img1, method=feature_extract_algo)
key2, feature2 = select_descriptor_method(img2, method=feature_extract_algo)
if show_descriptors ==True:
"Plotting for one of the keypoints pairs to see the distribution"
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(10, 10), constrained_layout=False) # Adjust figsize for desired output size
# Flatten the 2D array of subplots into a 1D list for easier indexing
axes_flat = axes.flatten()
#Simulate some data or image loading (replace with your actual data/image processing)
axes_flat[0].imshow(cv2.drawKeypoints(img2,
key2,
None,
color=(0,255,0))) # Assuming data represents an image (adjust for other data types)
axes_flat[0].set_xlabel('[a]',fontsize=14) # Optional title for each figure
axes_flat[1].imshow(cv2.drawKeypoints(img2[:,1250:],
key2,
None,
color=(0,255,0))) # Assuming data represents an image (adjust for other data types)
axes_flat[1].set_xlabel('[b]',fontsize=14) # Optional title for each figure
axes_flat[2].imshow(cv2.drawKeypoints(img1,
key1,
None,
color=(0,255,0))) # Assuming data represents an image (adjust for other data types)
axes_flat[2].set_xlabel('[c]',fontsize=14) # Optional title for each figure
axes_flat[3].imshow(cv2.drawKeypoints(img1[:,:1250],
key1,
None,
color=(0,255,0))) # Assuming data represents an image (adjust for other data types)
axes_flat[3].set_xlabel('[d]',fontsize=14) # Optional title for each figure
fig.tight_layout()
plt.show()
plt.close()
return key1,key2,feature1,feature2
def matching_images(feat1,feat2,key1,key2,img1,img2):
if features2match == 'bf':
"matching the features with a brute force aproach"
matches = key_points_matching(feat1,
feat2,
method=feature_extract_algo)
mapped_feature_image=cv2.drawMatches(img1,
key1,
img2,
key2,
matches[:100],
None,
flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
elif features2match == 'knn':
"matching the features with a knn aproach"
matches = key_points_matching_KNN(feat1,
feat2,
key1,
key2,
ratio=0.76,
method=feature_extract_algo)
mapped_feature_image=cv2.drawMatches(img1,
key1,
img2,
key2,
np.random.choice(matches,500),
None,
flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
return matches, mapped_feature_image
#------------------------------------------------------------------------------------------------------------------------
"image adjusting functions"
def adjust_exposure_clahe(image, clipLimit=2.5, tileGridSize=(8, 8)):
"""
Adjusts image exposure using Contrast Limited Adaptive Histogram Equalization (CLAHE).
Args:
image (np.ndarray): The input image.
clipLimit (float, optional): Threshold for contrast limiting. Defaults to 2.0.
tileGridSize (tuple, optional): Size of the grid for local histogram equalization. Defaults to (8, 8).
Returns:
np.ndarray: The exposure-corrected image.
"""
# Convert image to LAB color space (better suited for CLAHE)
lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
# Split the LAB image into channels
l, a, b = cv2.split(lab)
# Apply CLAHE to the lightness channel
clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize)
cl1 = clahe.apply(l)
# Merge the processed lightness channel back with A and B channels
merged = cv2.merge((cl1, a, b))
# Convert back to BGR color space
result = cv2.cvtColor(merged, cv2.COLOR_LAB2BGR)
return result
#--------------------------------------------------------------------------------------------------------------------
def crop_img(img):
"Checking image is grayscale or not. If image shape is 2 then gray scale otherwise not"
if len(img.shape) == 2:
img_grey= img.copy()
else:
"Converting BGR image to grayscale image"
img_grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
"To find upper threshold, we need to apply Otsu's thresholding"
upper_threshold, thresh_input_image = cv2.threshold(img_grey,
thresh=0,
maxval=255,
type=cv2.THRESH_BINARY + cv2.THRESH_OTSU)
"Calculate lower threshold"
lower_threshold = 0.5 * upper_threshold
"Apply canny edge detection"
canny = cv2.Canny(img_grey, lower_threshold, upper_threshold)
"Finding the non-zero points of canny"
pts = np.argwhere(canny > 0)
"Finding the min and max points"
y1, x1 = pts.min(axis=0)
y2, x2 = pts.max(axis=0)
"Crop ROI from the given image"
img_grey = img_grey[y1:y2, x1:x2]
return img_grey
#--------------------------------------------------------------------------------------------------------------------------------------
def adjust_contrast_brightness(image, alpha=1.0, beta=0.0):
"""
Adjusts the contrast and brightness of an image.
Args:
image (np.ndarray): The input image.
alpha (float, optional): Contrast adjustment factor. Defaults to 1.0 (no change).
- Values > 1 increase contrast.
- Values < 1 decrease contrast.
beta (float, optional): Brightness adjustment factor. Defaults to 0.0 (no change).
- Positive values increase brightness.
- Negative values decrease brightness.
Returns:
np.ndarray: The contrast and brightness adjusted image.
"""
# Convert image to float for calculations (avoid overflow)
image = image.astype(np.float32)
# Apply contrast and brightness adjustments
adjusted_image = cv2.multiply(image, alpha) + beta
# Clip values to prevent overflow (optional)
adjusted_image = np.clip(adjusted_image, 0, 255)
# Convert back to uint8 for display (if necessary)
adjusted_image = adjusted_image.astype(np.uint8)
return adjusted_image
#--------------------------------------------------------------------------------------------------------------------
def plot_image_histogram(image, colormap='gray'):
"""
Plots the histogram of an image.
Args:
image (np.ndarray): The input image.
colormap (str, optional): Colormap for the histogram plot. Defaults to 'gray'.
"""
# Convert image to grayscale if necessary
if len(image.shape) == 3:
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
gray_image = image
# Calculate the histogram
hist, bins = np.histogram(gray_image.flatten(), bins=256, range=(0, 256))
# Create the histogram plot
plt.figure()
plt.bar(bins[:-1], hist, color=colormap)
plt.xlabel("Pixel Intensity")
plt.ylabel("Number of Pixels")
plt.title("Histogram of the Image")
plt.xlim(0, 256) # Set x-axis limits for clarity
plt.grid(True)
plt.show()
plt.close()
#---------------------------------------------------------------------------------------------------------------------
"we load each image from the folder and we can apply the particular transformation for view and use of the images"
def load_images(image_dir,show_imgs=False):
"reads the files names on the folder(imgage dir) for loading them after"
image_files =[f for f in os.listdir(image_dir) if os.path.isfile(os.path.join(image_dir, f))]
"create empty arrays for the storage of the images"
images=[]
images_grey=[]
for image in image_files:
"join directories"
path= Path(os.path.join(image_dir,image))
"load"
img=cv2.imread(path)
"color change"
img=cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
"background removal to ease the loading and contrast tranformation"
img=remove(img)
"color correction"
img = adjust_exposure_clahe(img)
"contrast and brightnes correction"
img=adjust_contrast_brightness(img,alpha)
"final background removal"
img=remove(img)
"grey scale image version"
img_grey=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
"crop"
img_grey=crop_img(img_grey)
img=crop_img(img)
"add the now transformed images to their storage variables"
images.append(img)
images_grey.append(img_grey)
"to save the images into the same folder"
# output_path = str(path.parent / (path.stem + "-wout-bg.jpg"))"
# cv2.imwrite(output_path,img)
if show_imgs==True:
"plot the images"
"configuration for viewing the arrays (images)"
fig ,ax=plt.subplots(constrained_layout=False,figsize=[10,10])
plt.imshow(img)
plt.show()
return images, images_grey
#--------------------------------------------------------------------------------------------------------------------
""" Starting the main program """
"stitching parameters"
feature_extract_algo='sift'
features2match='knn'
max_slope=0.15
alpha=1.7
"Loading the path to the folder where the images are located"
image_dir =r"D:obstbinocularecc0_0.5sample_2"
"create empty arrays for the storage of the images and their parameters "
images=[]
images_grey=[]
keys=[]
features=[]
"load and finds the ROI and converts the images"
images,images_grey=load_images(image_dir)
"""loop for creating the stitching """
result=[]
for img in range(len(images_grey)):
"making sure that there's no indexing problems"
if img >=(len(images_grey)-1):
break
if img == 0:
img1=images_grey[1]
img2=images_grey[0]
else:
"for stitching the last pair with the new pair"
img1=images_grey[img+1]
img2=result
key1,key2,feat1,feat2=create_pair_descriptor(img1,img2,show_descriptors=True)
matches, img_mapped=matching_images(feat1, feat2, key1, key2, img1, img2)
plt.imshow(img_mapped)
M = homography_stitching(key1, key2, matches, reprojThresh=9)
if M is None:
print("error")
matches, Homography_matrix, status = M
print (Homography_matrix)
width=(img2.shape[1]+img1.shape[1])
height=max(img2.shape[0], img1.shape[1])
result =cv2.warpPerspective(images_grey[img], Homography_matrix, (width,height))
result = crop_img(result)
plt.axis('off')
plt.imshow(result)
Daniel Verdin is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.