So I have a mask classification task to predict if person is wearing a mask or not and it was trained using this way of image loading
def preprocess_image(image_path, label):
print('Preprocessing now')
# Read and decode the image
img = tf.io.read_file(image_path)
img = tf.image.decode_jpeg(img, channels=3)
# Resize and rescale the image
img = tf.image.resize(img, (224, 224))
img = tf.image.convert_image_dtype(img, tf.float32)
return img, label
def create_dataset(dataframe, batch_size, label_encoder):
# Create a dataset from dataframe
image_paths = dataframe['Image_Path'].values
labels = dataframe['Label'].values
labels = label_encoder.transform(labels)
dataset = tf.data.Dataset.from_tensor_slices((image_paths, labels))
# Map preprocessing function in parallel
print('entering mapping')
dataset = dataset.map(preprocess_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Shuffle and batch the dataset
print('leaving mapping entering shuffle')
dataset = dataset.shuffle(buffer_size=(6000))
print('leaving shuffle entering batching')
dataset = dataset.batch(batch_size)
print('leaving batching entering prefetching')
# Prefetch data for better GPU utilization
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return dataset
# Example usage:
batch_size = 256
train_dataset1 = create_dataset(TrainData, batch_size, label_encoder)
validation_dataset1 = create_dataset(ValidationData, batch_size, label_encoder)
test_dataset1 = create_dataset(TestData, batch_size, label_encoder)
it gets 99% accuracy and validation but when running the model for inference using .predict() and loading test images using the same preprocessing code it returns wrong perdiction to reallife images and even images from the training and validation set itself….but when loading images using this code the predictions work perfectly
def preprocess_image(image_path):
print('Preprocessing now')
# Open the image using PIL
img = Image.open(image_path)
# Resize the image
img = img.resize((224, 224))
# Convert the image to a NumPy array
img_array = tf.keras.preprocessing.image.img_to_array(img)
# Expand dimensions to match the expected input shape of the model (batch_size, height, width, channels)
img_array = tf.expand_dims(img_array, axis=0)
# Preprocess the image (normalize, etc.)
return img_array
so could someone explain the change ? as this problem is also affecting me in another project which is emotion detection (no matter the loaded image the prediction is the same but small change in decimals when ran on python notebook or when running inference on mobile application flutter)
As i said above i tried different loading of image method and it worked flawlessly i want to understand where this problem stems from and how can it be avoided to make it work also on flutter mobile application