I am using the YOLOv8n TFLite model to detect objects in images. My process involves loading the model, processing an image, and drawing the bounding boxes around detected objects. The original image and model input dimensions are both 640×640. Despite this, the bounding boxes in the output image are not positioned or scaled correctly.
<code>import numpy as np
import cv2
import tensorflow as tf
COCO_CLASSES = [
"person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat",
"traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat",
"dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack",
"umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball",
"kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket",
"bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
"sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair",
"couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote",
"keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book",
"clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"
]
def load_model(model_path):
interpreter = tf.lite.Interpreter(model_path=model_path)
interpreter.allocate_tensors()
print("Model loaded and tensors allocated.")
return interpreter
def non_max_suppression(boxes, scores, iou_threshold=0.5):
indices = cv2.dnn.NMSBoxes(boxes, scores, score_threshold=0.25, nms_threshold=iou_threshold)
# Check if indices is empty
if len(indices) > 0:
indices = indices.flatten()
return indices
def detect_objects(interpreter, image_path, conf_threshold=0.25, iou_threshold=0.5):
image = cv2.imread(image_path)
if image is None:
raise ValueError("Unable to read the image.")
print(f"Image {image_path} loaded successfully.")
original_height, original_width = image.shape[:2]
print(f"Original image dimensions: width={original_width}, height={original_height}")
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_shape = input_details[0]['shape']
input_height, input_width = input_shape[1], input_shape[2]
print(f"Model input dimensions: width={input_width}, height={input_height}")
# Resize and normalize image
resized_image = cv2.resize(image, (input_width, input_height), interpolation=cv2.INTER_AREA)
input_data = np.expand_dims(resized_image.astype(np.float32) / 255.0, axis=0)
print(f"Input data shape: {input_data.shape}")
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output = interpreter.get_tensor(output_details[0]['index'])[0].T
boxes, scores = output[:, :4], output[:, 4:]
max_scores = np.max(scores, axis=1)
above_threshold = max_scores > conf_threshold
valid_boxes = boxes[above_threshold]
valid_scores = max_scores[above_threshold]
valid_classes = np.argmax(scores[above_threshold], axis=1)
print(f"Number of valid detections: {len(valid_boxes)}")
# Scale boxes to original image size
valid_boxes[:, [0, 2]] *= original_width # x_min and x_max
valid_boxes[:, [1, 3]] *= original_height # y_min and y_max
print(f"Valid detections after scaling: {valid_boxes}")
boxes_for_nms = valid_boxes.tolist()
scores_for_nms = valid_scores.tolist()
indices = non_max_suppression(boxes_for_nms, scores_for_nms, iou_threshold)
print(f"Number of valid detections after NMS: {len(indices)}")
for idx in indices:
box = valid_boxes[idx]
score = valid_scores[idx]
class_id = valid_classes[idx]
x1, y1, x2, y2 = map(int, box)
class_name = COCO_CLASSES[class_id]
print(f"Detection {idx+1}: Class ID: {class_id}, Class Name: {class_name}, Score: {score:.2f}, Bounding Box: ({x1}, {y1}, {x2}, {y2})")
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
label = f"{class_name}: {score:.2f}"
label_size, base_line = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
cv2.rectangle(image, (x1, y1 - label_size[1]), (x1 + label_size[0], y1), (255, 255, 255), cv2.FILLED)
cv2.putText(image, label, (x1, y1 - base_line), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
result_path = 'result.jpg'
cv2.imwrite(result_path, image)
print(f"Result image saved to {result_path}")
return image
def main():
model_path = 'yolov8n_saved_model/yolov8n_float32.tflite'
image_path = 'ImageModelSize.jpg'
interpreter = load_model(model_path)
detect_objects(interpreter, image_path)
if __name__ == "__main__":
main()
</code>
<code>import numpy as np
import cv2
import tensorflow as tf
COCO_CLASSES = [
"person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat",
"traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat",
"dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack",
"umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball",
"kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket",
"bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
"sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair",
"couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote",
"keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book",
"clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"
]
def load_model(model_path):
interpreter = tf.lite.Interpreter(model_path=model_path)
interpreter.allocate_tensors()
print("Model loaded and tensors allocated.")
return interpreter
def non_max_suppression(boxes, scores, iou_threshold=0.5):
indices = cv2.dnn.NMSBoxes(boxes, scores, score_threshold=0.25, nms_threshold=iou_threshold)
# Check if indices is empty
if len(indices) > 0:
indices = indices.flatten()
return indices
def detect_objects(interpreter, image_path, conf_threshold=0.25, iou_threshold=0.5):
image = cv2.imread(image_path)
if image is None:
raise ValueError("Unable to read the image.")
print(f"Image {image_path} loaded successfully.")
original_height, original_width = image.shape[:2]
print(f"Original image dimensions: width={original_width}, height={original_height}")
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_shape = input_details[0]['shape']
input_height, input_width = input_shape[1], input_shape[2]
print(f"Model input dimensions: width={input_width}, height={input_height}")
# Resize and normalize image
resized_image = cv2.resize(image, (input_width, input_height), interpolation=cv2.INTER_AREA)
input_data = np.expand_dims(resized_image.astype(np.float32) / 255.0, axis=0)
print(f"Input data shape: {input_data.shape}")
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output = interpreter.get_tensor(output_details[0]['index'])[0].T
boxes, scores = output[:, :4], output[:, 4:]
max_scores = np.max(scores, axis=1)
above_threshold = max_scores > conf_threshold
valid_boxes = boxes[above_threshold]
valid_scores = max_scores[above_threshold]
valid_classes = np.argmax(scores[above_threshold], axis=1)
print(f"Number of valid detections: {len(valid_boxes)}")
# Scale boxes to original image size
valid_boxes[:, [0, 2]] *= original_width # x_min and x_max
valid_boxes[:, [1, 3]] *= original_height # y_min and y_max
print(f"Valid detections after scaling: {valid_boxes}")
boxes_for_nms = valid_boxes.tolist()
scores_for_nms = valid_scores.tolist()
indices = non_max_suppression(boxes_for_nms, scores_for_nms, iou_threshold)
print(f"Number of valid detections after NMS: {len(indices)}")
for idx in indices:
box = valid_boxes[idx]
score = valid_scores[idx]
class_id = valid_classes[idx]
x1, y1, x2, y2 = map(int, box)
class_name = COCO_CLASSES[class_id]
print(f"Detection {idx+1}: Class ID: {class_id}, Class Name: {class_name}, Score: {score:.2f}, Bounding Box: ({x1}, {y1}, {x2}, {y2})")
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
label = f"{class_name}: {score:.2f}"
label_size, base_line = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
cv2.rectangle(image, (x1, y1 - label_size[1]), (x1 + label_size[0], y1), (255, 255, 255), cv2.FILLED)
cv2.putText(image, label, (x1, y1 - base_line), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
result_path = 'result.jpg'
cv2.imwrite(result_path, image)
print(f"Result image saved to {result_path}")
return image
def main():
model_path = 'yolov8n_saved_model/yolov8n_float32.tflite'
image_path = 'ImageModelSize.jpg'
interpreter = load_model(model_path)
detect_objects(interpreter, image_path)
if __name__ == "__main__":
main()
</code>
import numpy as np
import cv2
import tensorflow as tf
COCO_CLASSES = [
"person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat",
"traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat",
"dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack",
"umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball",
"kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket",
"bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
"sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair",
"couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote",
"keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book",
"clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"
]
def load_model(model_path):
interpreter = tf.lite.Interpreter(model_path=model_path)
interpreter.allocate_tensors()
print("Model loaded and tensors allocated.")
return interpreter
def non_max_suppression(boxes, scores, iou_threshold=0.5):
indices = cv2.dnn.NMSBoxes(boxes, scores, score_threshold=0.25, nms_threshold=iou_threshold)
# Check if indices is empty
if len(indices) > 0:
indices = indices.flatten()
return indices
def detect_objects(interpreter, image_path, conf_threshold=0.25, iou_threshold=0.5):
image = cv2.imread(image_path)
if image is None:
raise ValueError("Unable to read the image.")
print(f"Image {image_path} loaded successfully.")
original_height, original_width = image.shape[:2]
print(f"Original image dimensions: width={original_width}, height={original_height}")
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_shape = input_details[0]['shape']
input_height, input_width = input_shape[1], input_shape[2]
print(f"Model input dimensions: width={input_width}, height={input_height}")
# Resize and normalize image
resized_image = cv2.resize(image, (input_width, input_height), interpolation=cv2.INTER_AREA)
input_data = np.expand_dims(resized_image.astype(np.float32) / 255.0, axis=0)
print(f"Input data shape: {input_data.shape}")
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output = interpreter.get_tensor(output_details[0]['index'])[0].T
boxes, scores = output[:, :4], output[:, 4:]
max_scores = np.max(scores, axis=1)
above_threshold = max_scores > conf_threshold
valid_boxes = boxes[above_threshold]
valid_scores = max_scores[above_threshold]
valid_classes = np.argmax(scores[above_threshold], axis=1)
print(f"Number of valid detections: {len(valid_boxes)}")
# Scale boxes to original image size
valid_boxes[:, [0, 2]] *= original_width # x_min and x_max
valid_boxes[:, [1, 3]] *= original_height # y_min and y_max
print(f"Valid detections after scaling: {valid_boxes}")
boxes_for_nms = valid_boxes.tolist()
scores_for_nms = valid_scores.tolist()
indices = non_max_suppression(boxes_for_nms, scores_for_nms, iou_threshold)
print(f"Number of valid detections after NMS: {len(indices)}")
for idx in indices:
box = valid_boxes[idx]
score = valid_scores[idx]
class_id = valid_classes[idx]
x1, y1, x2, y2 = map(int, box)
class_name = COCO_CLASSES[class_id]
print(f"Detection {idx+1}: Class ID: {class_id}, Class Name: {class_name}, Score: {score:.2f}, Bounding Box: ({x1}, {y1}, {x2}, {y2})")
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
label = f"{class_name}: {score:.2f}"
label_size, base_line = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
cv2.rectangle(image, (x1, y1 - label_size[1]), (x1 + label_size[0], y1), (255, 255, 255), cv2.FILLED)
cv2.putText(image, label, (x1, y1 - base_line), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
result_path = 'result.jpg'
cv2.imwrite(result_path, image)
print(f"Result image saved to {result_path}")
return image
def main():
model_path = 'yolov8n_saved_model/yolov8n_float32.tflite'
image_path = 'ImageModelSize.jpg'
interpreter = load_model(model_path)
detect_objects(interpreter, image_path)
if __name__ == "__main__":
main()
Tflite Model :
defaul yolov8n.pt model converted to tflite
Observed Behavior:
- The bounding boxes appear misaligned in the output image.
- The size of the bounding boxes seems incorrect.
- Both the position and scale of bounding boxes do not match the objects in the original image.
Expected Behavior:
- Bounding boxes should be accurately aligned with the detected objects in the image.
- The size of the bounding boxes should correspond to the dimensions of the objects.
Images:
-
Input Image:
-
Output Image:
Questions:
- What might be causing the bounding boxes to be misaligned and incorrectly sized?
- Is there a specific step or method in handling YOLOv8n TFLite outputs that I might be missing or implementing incorrectly?
- How can I ensure the bounding boxes are accurately scaled to the original image dimensions?
Any insights or suggestions would be greatly appreciated!
-
Ensured correct scaling of bounding boxes:
- Multiplied
x_min
,y_min
,x_max
,y_max
coordinates by the original image width and height.
- Multiplied
-
Applied non-max suppression:
- Used OpenCV’s
cv2.dnn.NMSBoxes
to filter overlapping boxes.
- Used OpenCV’s
-
Checked model output format:
- Verified that the output coordinates are in the format
[x_min, y_min, x_max, y_max]
and normalized (0 to 1).
- Verified that the output coordinates are in the format