I am running this code i have faced lots for error while running , but i try to manage solving all errors but i got this while running the epocs.
The Code:
def scheduler(epoch, lr):
return lr * tf.math.exp(-0.1)
Calculating the loss using the CTC LOSS function ,as because it perform well on images and video sequencing
def CTCLoss(y_true, y_pred):
batch_len = tf.cast(tf.shape(y_true)[0], dtype="int64")
input_length = tf.cast(tf.shape(y_pred)[1], dtype="int64")
label_length = tf.cast(tf.shape(y_true)[1], dtype="int64")
input_length = input_length * tf.ones(shape=(batch_len, 1), dtype="int64")
label_length = label_length * tf.ones(shape=(batch_len, 1), dtype="int64")
loss = tf.keras.backend.ctc_batch_cost(y_true, y_pred, input_length, label_length)
Actually generating the predition using the class method converting the num to char and char to num function
class ProduceExample(tf.keras.callbacks.Callback):
def __init__(self, dataset) -> None:
self.dataset = dataset.as_numpy_iterator()
def on_epoch_end(self, epoch, logs=None) -> None:
data = self.dataset.next()
yhat = self.model.predict(data[0])
decoded = tf.keras.backend.ctc_decode(yhat, [75,75], greedy=False)[0][0].numpy()
for x in range(len(yhat)):
print('Original:', tf.strings.reduce_join(num_to_char(data[1][x])).numpy().decode('utf-8'))
print('Prediction:', tf.strings.reduce_join(num_to_char(decoded[x])).numpy().decode('utf-8'))
Optimizing the Complilation using the Adam Optimizer
model.compile(optimizer=Adam(learning_rate=0.0001), loss=CTCLoss)
checkpoint_callback = ModelCheckpoint(os.path.join('models','checkpoint'), monitor='loss', save_weights_only=True)
schedule_callback = LearningRateScheduler(scheduler)
example_callback = ProduceExample(test)
Getting Error while running this LOC throwing Graph Execution Error currently i am only trying for 10 epocs to test but throwing the error
model.fit(train, validation_data=test, epochs=700, callbacks=[checkpoint_callback, schedule_callback, example_callback])
<code>
def scheduler(epoch, lr):
if epoch < 30:
return lr
else:
return lr * tf.math.exp(-0.1)
Calculating the loss using the CTC LOSS function ,as because it perform well on images and video sequencing
def CTCLoss(y_true, y_pred):
batch_len = tf.cast(tf.shape(y_true)[0], dtype="int64")
input_length = tf.cast(tf.shape(y_pred)[1], dtype="int64")
label_length = tf.cast(tf.shape(y_true)[1], dtype="int64")
input_length = input_length * tf.ones(shape=(batch_len, 1), dtype="int64")
label_length = label_length * tf.ones(shape=(batch_len, 1), dtype="int64")
loss = tf.keras.backend.ctc_batch_cost(y_true, y_pred, input_length, label_length)
return loss
Actually generating the predition using the class method converting the num to char and char to num function
class ProduceExample(tf.keras.callbacks.Callback):
def __init__(self, dataset) -> None:
self.dataset = dataset.as_numpy_iterator()
def on_epoch_end(self, epoch, logs=None) -> None:
data = self.dataset.next()
yhat = self.model.predict(data[0])
decoded = tf.keras.backend.ctc_decode(yhat, [75,75], greedy=False)[0][0].numpy()
for x in range(len(yhat)):
print('Original:', tf.strings.reduce_join(num_to_char(data[1][x])).numpy().decode('utf-8'))
print('Prediction:', tf.strings.reduce_join(num_to_char(decoded[x])).numpy().decode('utf-8'))
print('~'*100)
Optimizing the Complilation using the Adam Optimizer
model.compile(optimizer=Adam(learning_rate=0.0001), loss=CTCLoss)
checkpoint_callback = ModelCheckpoint(os.path.join('models','checkpoint'), monitor='loss', save_weights_only=True)
schedule_callback = LearningRateScheduler(scheduler)
example_callback = ProduceExample(test)
Getting Error while running this LOC throwing Graph Execution Error currently i am only trying for 10 epocs to test but throwing the error
model.fit(train, validation_data=test, epochs=700, callbacks=[checkpoint_callback, schedule_callback, example_callback])
</code>
def scheduler(epoch, lr):
if epoch < 30:
return lr
else:
return lr * tf.math.exp(-0.1)
Calculating the loss using the CTC LOSS function ,as because it perform well on images and video sequencing
def CTCLoss(y_true, y_pred):
batch_len = tf.cast(tf.shape(y_true)[0], dtype="int64")
input_length = tf.cast(tf.shape(y_pred)[1], dtype="int64")
label_length = tf.cast(tf.shape(y_true)[1], dtype="int64")
input_length = input_length * tf.ones(shape=(batch_len, 1), dtype="int64")
label_length = label_length * tf.ones(shape=(batch_len, 1), dtype="int64")
loss = tf.keras.backend.ctc_batch_cost(y_true, y_pred, input_length, label_length)
return loss
Actually generating the predition using the class method converting the num to char and char to num function
class ProduceExample(tf.keras.callbacks.Callback):
def __init__(self, dataset) -> None:
self.dataset = dataset.as_numpy_iterator()
def on_epoch_end(self, epoch, logs=None) -> None:
data = self.dataset.next()
yhat = self.model.predict(data[0])
decoded = tf.keras.backend.ctc_decode(yhat, [75,75], greedy=False)[0][0].numpy()
for x in range(len(yhat)):
print('Original:', tf.strings.reduce_join(num_to_char(data[1][x])).numpy().decode('utf-8'))
print('Prediction:', tf.strings.reduce_join(num_to_char(decoded[x])).numpy().decode('utf-8'))
print('~'*100)
Optimizing the Complilation using the Adam Optimizer
model.compile(optimizer=Adam(learning_rate=0.0001), loss=CTCLoss)
checkpoint_callback = ModelCheckpoint(os.path.join('models','checkpoint'), monitor='loss', save_weights_only=True)
schedule_callback = LearningRateScheduler(scheduler)
example_callback = ProduceExample(test)
Getting Error while running this LOC throwing Graph Execution Error currently i am only trying for 10 epocs to test but throwing the error
model.fit(train, validation_data=test, epochs=700, callbacks=[checkpoint_callback, schedule_callback, example_callback])
i tried this code and got this error
Epoch 1/10
WARNING:tensorflow:From e:Pyhton 3.12Libsite-packageskerassrclegacybackend.py:666: The name tf.nn.ctc_loss is deprecated. Please use tf.compat.v1.nn.ctc_loss instead.
InvalidArgumentError Traceback (most recent call last)
Cell In[108], line 1
—-> 1 model.fit(train,validation_data=test ,epochs=10 ,callbacks =[checkpoint_callback, schedule_callback, example_callback])
…
File “e:Pyhton 3.12Libsite-packageskerassrcbackendtensorflownumpy.py”, line 1618, in reshape
Only one input size may be -1, not both 0 and 1
[[{{node sequential_11_1/time_distributed_9_1/Reshape_72}}]] [Op:__inference_one_step_on_iterator_49320]