I have a variety of annotated images, that I am going to use in order to train my program to count the number of stitches from non annotated images.
Here is a Drive link with the resources: https://drive.google.com/drive/folders/1dtFFr3dxIAoW6Ba3ae7TMWt_AbcggE6S?usp=sharing
import xmltodict
import csv
import cv2
import numpy as np
import os
import argparse
#Initialize the paths
xml_path = 'resources/annotations.xml'
csv_path = 'src/output.csv'
# Function to parse the XML and extract annotations
def parse_annotations(xml_path):
with open(xml_path) as fd:
doc = xmltodict.parse(fd.read())
annotations = []
for image in doc["annotations"]["image"]:
image_data = {'filename': image['@name'], 'stitches': []}
if "polyline" in image:
for pline in image["polyline"]:
if isinstance(pline, dict) and '@label' in pline and pline['@label'] == 'Stitch':
points = [[float(p) for p in point.split(',')] for point in pline['@points'].split(';')]
image_data['stitches'].append(points)
annotations.append(image_data)
return annotations
# Initialize the annotations
annotations = parse_annotations(xml_path)
# Function to count stitches in an image based on annotations
def count_stitches(image_data):
return len(image_data['stitches'])
# Update the annotations with the stitch count
for image_data in annotations:
image_data['n_stitches'] = count_stitches(image_data)
def preprocess_image(image):
# Convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Apply Gaussian blur (L%P)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
return blurred
def segment_image(gray_image):
# Edge detection
edges = cv2.Canny(gray_image, 50, 150)
# Find contours
contours, _ = cv2.findContours(edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
return contours
def visualize_contours(image, contours):
vis_image = image.copy()
cv2.drawContours(vis_image, contours, -1, (0, 255, 0), 1)
cv2.imshow('Contours', vis_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def count_detected_stitches(contours):
# Simple heuristic for now: assume each contour is a stitch.
# This will need to be replaced with a real classification model later.
return len(contours)
def parse_arguments():
parser = argparse.ArgumentParser(description='Stitch Counter')
parser.add_argument('output_csv', type=str, help='The CSV file to output results')
parser.add_argument('-v', '--visual', action='store_true', help='Visual mode to display images')
parser.add_argument('images', type=str, nargs='+', help='Image filenames')
return parser.parse_args()
#Function to write the results to a CSV file
def write_to_csv(results, csv_path):
with open(csv_path, 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['filename', 'n_stitches'])
writer.writerows(results)
def is_image_blurred(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
variance_of_laplacian = cv2.Laplacian(gray, cv2.CV_64F).var()
# If the variance is below a threshold, it's likely that the image is blurred.
return variance_of_laplacian < 100 # The threshold may need adjustment.
def is_image_brightness_ok(image, threshold_low=50, threshold_high=200):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
brightness = np.mean(gray)
# Check if the brightness is within the acceptable range
return threshold_low < brightness < threshold_high
# Main execution
if __name__ == "__main__":
args = parse_arguments()
annotations = parse_annotations(xml_path)
# Create a dictionary from annotations for easy lookup
annotation_dict = {image_data['filename']: image_data for image_data in annotations}
# If the script is run with the `-v` visual flag or there are image arguments, use the provided images.
# Otherwise, use the annotated images from the XML file.
image_filenames = args.images if args.images or args.visual else [image_data['filename'] for image_data in annotations]
results = []
for filename in image_filenames:
image_path = os.path.join('resources/incision_couples', filename)
image = cv2.imread(image_path)
if image is None:
print(f"Error: Cannot read image {filename}.")
results.append((filename, -1))
continue
elif not is_image_brightness_ok(image) or is_image_blurred(image):
n_stitches = -1 # Image is either too bright/dark or too blurred
print(f"Error: Cannot count stitches in image {filename}")
else:
if filename in annotation_dict:
# Use the stitch count from the annotation
n_stitches = annotation_dict[filename]['n_stitches']
else:
# Calculate the stitch count
try:
if args.visual:
preprocessed_image = preprocess_image(image)
contours = segment_image(preprocessed_image)
visualize_contours(preprocessed_image, contours)
n_stitches = count_stitches(image_data)
else:
n_stitches = count_stitches(image_data)
except Exception as e:
print(f"Error: Cannot count stitches in image {filename}")
n_stitches = -1
results.append((filename, n_stitches))
write_to_csv(results, args.output_csv)
For now, I am trying to count the number of stitches using the .xml file(which contains the annotations). For some reason I cannot do that properly. Could you please help me?
marilena chochlidaki is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.