@tf.function
def train_step(batch):
# Record all of our operations
with tf.GradientTape() as tape:
# Get anchor and positive/negative image
X = batch[:2]
# Get label
y = batch[2]
# Forward pass
yhat = siamese_model(X, training=True)
# Calculate loss
loss = binary_cross_loss(y, yhat)
# Calculate gradients
grad = tape.gradient(loss, siamese_model.trainable_variables)
# Calculate updated weights and apply to siamese model
opt.apply_gradients(zip(grad, siamese_model.trainable_variables))
def train(data, EPOCHS):
# Loop through epochs
for epoch in range(1, EPOCHS+1):
print(‘n Epoch {}/{}’.format(epoch, EPOCHS))
progbar = tf.keras.utils.Progbar(len(data))
# Creating a metric object
r = Recall()
p = Precision()
# Loop through each batch
for idx, batch in enumerate(data):
# Run train step here
loss = train_step(batch)
yhat = siamese_model.predict(batch[:2])
r.update_state(batch[2], yhat)
p.update_state(batch[2], yhat)
progbar.update(idx+1)
print(loss.numpy(), r.result().numpy(), p.result().numpy())
# Save checkpoints
if epoch % 10 == 0:
checkpoint.save(file_prefix=checkpoint_prefix)
EPOCHS = 50
train(train_data,EPOCHS)
I tried changing the loss functions but it didnt work and i am having the same problem
New contributor
Ranjan U is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.