I’m trying to use faceNet keras and tensorflow to do facial recognition with mtcnn, but it’s giving me the error EOFError: EOF read where object expected
and I wanted to know if this model can still be used.
I have already tried using other codes that do not need the Keras model and they worked with the indicated versions of Tensorflow and Keras
The versions I should use are:
TensorFlow Version: 2.16.1
Keras version: 3.3.3
Pyyhon Version 3.11.9
import os
from os import listdir
from PIL import Image as Img
from numpy import asarray
from numpy import expand_dims
from matplotlib import pyplot
from keras.models import load_model #type: ignore
import numpy as np
import tensorflow as tf
from keras_facenet import FaceNet # type: ignore
import pickle
import cv2
HaarCascade = cv2.CascadeClassifier(cv2.samples.findFile(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml'))
MyFaceNet = load_model("F:AndreFaceRecognitiontest/facenet_keras.h5")
folder='faces/'
database = {}
for filename in listdir(folder):
path = folder + filename
gbr1 = cv2.imread(folder + filename)
wajah = HaarCascade.detectMultiScale(gbr1,1.1,4)
if len(wajah)>0:
x1, y1, width, height = wajah[0]
else:
x1, y1, width, height = 1, 1, 10, 10
x1, y1 = abs(x1), abs(y1)
x2, y2 = x1 + width, y1 + height
gbr = cv2.cvtColor(gbr1, cv2.COLOR_BGR2RGB)
gbr = Img.fromarray(gbr) # konversi dari OpenCV ke PIL
gbr_array = asarray(gbr)
face = gbr_array[y1:y2, x1:x2]
face = Img.fromarray(face)
face = face.resize((160,160))
face = asarray(face)
face = face.astype('float32')
mean, std = face.mean(), face.std()
face = (face - mean) / std
face = expand_dims(face, axis=0)
signature = MyFaceNet.predict(face)
database[os.path.splitext(filename)[0]]=signature
myfile = open("data.pkl", "wb")
pickle.dump(database, myfile)
myfile.close()
myfile = open("data.pkl", "rb")
database = pickle.load(myfile)
myfile.close()
def findFaces(frame):
gbr1 = frame
gbr = cv2.cvtColor(gbr1, cv2.COLOR_BGR2RGB)
gbr = Img.fromarray(gbr) # konversi dari OpenCV ke PIL
gbr_array = asarray(gbr)
wajah = HaarCascade.detectMultiScale(gbr1,1.1,4)
for (x1,y1,w,h) in wajah:
x1, y1 = abs(x1), abs(y1)
x2, y2 = x1 + w, y1 + h
face = gbr_array[y1:y2, x1:x2]
face = Img.fromarray(face)
face = face.resize((160,160))
face = asarray(face)
face = face.astype('float32')
mean, std = face.mean(), face.std()
face = (face - mean) / std
face = expand_dims(face, axis=0)
signature = MyFaceNet.predict(face)
min_dist=100
identity=' '
for key, value in database.items() :
dist = np.linalg.norm(value-signature)
if dist < min_dist:
min_dist = dist
identity = key
cv2.putText(gbr1,identity, (x1,y1),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0), 1, cv2.LINE_AA)
cv2.rectangle(gbr1,(x1,y1),(x2,y2), (0,255,0), 2)
return gbr1
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if not ret:
break
frame = findFaces(frame)
cv2.imshow('Live Stream', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()