I have checked with about 20 images. The same ones for python, and the same ones with tensorflow.js with extremely similar preprocessing. It keeps giving me the 0th class.
Here is my python training code:
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.resnet50 import preprocess_input
import numpy as np
tf.random.set_seed(123)
img_height, img_width = 224, 224
batch_size = 32
data_dir = '/content/drive/MyDrive/AI2/Processed'
# random horizontal flips, random rotation, color jitter, normalize
data_augmentation = tf.keras.Sequential([
layers.experimental.preprocessing.RandomFlip('horizontal'),
layers.experimental.preprocessing.RandomRotation(0.2),
layers.experimental.preprocessing.RandomZoom(0.2),
layers.experimental.preprocessing.RandomContrast(0.2),
layers.experimental.preprocessing.RandomTranslation(height_factor=(-0.1, 0.1), width_factor=(-0.1, 0.1)),
])
def one_hot_encode(image, label):
label = tf.one_hot(label, 5)
return image, label
def main():
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="training",
seed=123,
image_size=(img_height, img_width),
)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
)
class_names = train_ds.class_names
print(class_names)
class_count = [0] * len(class_names)
for images, labels in train_ds.unbatch().as_numpy_iterator():
class_count[int(labels)] += 1
train_ds = train_ds.map(lambda x, y: (data_augmentation(x, training=True), y))
train_ds = train_ds.map(one_hot_encode).prefetch(buffer_size=tf.data.AUTOTUNE)
val_ds = val_ds.map(one_hot_encode).prefetch(buffer_size=tf.data.AUTOTUNE)
base_model = tf.keras.applications.ResNet50(
include_top=False,
weights='imagenet',
input_shape=(img_height, img_width, 3)
)
# Freeze all layers in the base model
base_model.trainable = False
# Create the model architecture
inputs = tf.keras.Input(shape=(img_height, img_width, 3))
x = base_model(inputs, training=False)
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dense(1024, activation='relu')(x)
outputs = layers.Dense(len(class_names), activation='softmax')(x)
model = tf.keras.Model(inputs, outputs)
# Freeze all layers except the last one
# for layer in model.layers[:-1]:
# layer.trainable = False
# Unfreeze the last layer in the model
model.layers[-1].trainable = True
# Compile the model
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
# Train the model
class_weights = {i: 1. / count for i, count in enumerate(class_count)}
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=10,
class_weight=class_weights
)
# Evaluate on validation set
loss, accuracy = model.evaluate(val_ds)
print(f"Validation accuracy: {accuracy * 100}%")
# Print the model summary
model.summary()
model.save('/content/drive/MyDrive/AI2/sampleModel.h5')
I then do this:
tensorflowjs_converter --input_format keras /Users/hb/Desktop/tfModel3/sampleModel.h5 /Users/hb/Desktop/tfModel3/sampleModelNew
Here is my Python code for predicting:
model = tf.keras.models.load_model('/content/drive/MyDrive/AI2/sampleModel.h5')
# Show the model architecture
model.summary()
img_height = 224
img_width = 224
# Load the image
img_path = '/content/drive/MyDrive/AI2/RealTest/bmw.jpg' # replace with your image path
img = image.load_img(img_path, target_size=(img_height, img_width))
img_array = image.img_to_array(img)
# Preprocess the image using the same preprocessing function used for ImageNet
img_array = preprocess_input(img_array)
img_array = np.expand_dims(img_array, axis=0)
# Make a prediction
predictions = model.predict(img_array)
predicted_class = np.argmax(predictions[0])
# Print the class name
print('Predicted class:', predicted_class)
And after all that, I have my React code using tensorflowjs which gives me no errors on every image I provide it. The model imports successfully! It gives a prediction without errors. But its the wrong prediction every single time!:
const { state, dispatch } = useMyContext(); // Use the custom hook to access state and dispatch
const [model, setModel] = React.useState<any>(null);
useEffect(() => {
async function loadModel() {
const model = await tf.loadLayersModel('/models/model.json');
setModel(model);
console.log('model loaded')
console.log('model', model);
model.summary();
}
loadModel()
}, []);
useEffect(() => {
if (!state.imageFiles) return;
if (state.imageFiles.length === 0) return;
const newImgFunc = async () => {
const img = new Image();
const imageFile = state.imageFiles[0];
img.src = await URL.createObjectURL(imageFile);
img.onload = async () => {
const class_names = ['Audi_A3_2021_Processed', 'BMW_1-Series_2016_Processed', 'Ford_Puma_2021_Processed', 'Nissan_Juke_2023_Processed', 'Nissan_Qashqai_2021_Processed'];
// Match preprocess_input from Keras for ResNet50
const mean = tf.tensor1d([123.68, 116.779, 103.939]);
const std = tf.tensor1d([58.393, 57.12, 57.375]); // This line can be removed if not used
let tensor = tf.browser.fromPixels(img)
.resizeBilinear([224, 224]) // Using bilinear, as its typically more accurate
.toFloat()
.sub(mean) // Subtract mean
.div(tf.scalar(255)); // Scale to [0,1]
const prediction = await model.predict(tensor.expandDims());
const predictions = prediction.dataSync();
const predictedClassIndex = predictions.indexOf(Math.max(...predictions));
console.log('Predicted Class:', class_names[predictedClassIndex]);
};
}
newImgFunc();
}, [state]);
const onDrop = useCallback((acceptedFiles: any[]) => {
acceptedFiles.forEach((file: Blob) => {
const reader = new FileReader()
reader.onabort = () => console.log('file reading was aborted')
reader.onerror = () => console.log('file reading has failed')
reader.onload = () => {
const binaryStr = reader.result
console.log(binaryStr)
const file = new File(
[binaryStr as BlobPart],
'imageName'
);
dispatch({ type: 'SET_IMAGE_FILES', payload: [file] });
}
reader.readAsArrayBuffer(file)
})
}, [])