hello friend i am using google colab gpu and i need to save model as many as i can and then restart training model in a new google colab below is my code but fave lower accuracy and higher loss for example in epoch 89 i have
acc: 0.9990301728 loss:0.002603143221
acc_val: 0.9557291865 loss_val:0.2962754667
and in epoch 90 that first time run in new google colab i have:
acc: 0.9803879261 loss:01103143221
acc_val: 0.939127624 loss_val:0.1836656481
# Define 5-fold cross-validation
kf = KFold(n_splits=5, shuffle=True, random_state=42)
conf_matrix_all_CNN_10s = []
fpr_matrix_all_CNN_10s = []
tpr_matrix_all_CNN_10s = []
for i, (train_val_index, test_index) in enumerate(kf.split(all_file_paths)):
print(f"Fold {i+1}:")
# Split data into train and val sets for this fold
train_val_files = [all_file_paths[j] for j in train_val_index]
test_files = [all_file_paths[j] for j in test_index]
train_files, val_files = train_test_split(train_val_files, test_size=0.25, random_state=42)
# Create train and test data generators
train_generator = data_generator_train(train_files, batch_size)
val_generator = data_generator_val(val_files, batch_size)
test_generator = data_generator_test(test_files, batch_size)
# Define model architecture
model = Sequential()
input_shape = np.load(epilepsy_file_paths[0]).T.shape
model.add(Conv1D(64, 3, strides=1, input_shape=input_shape, padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPool1D(pool_size=(2)))
model.add(Dropout(0.5))
model.add(Conv1D(48, 3, strides=1, padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPool1D(pool_size=(2)))
model.add(Dropout(0.5))
model.add(Conv1D(32, 3, strides=1, padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPool1D(pool_size=(2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid'))
modelOptimizer = Adam(learning_rate=0.001)
model.compile(optimizer=modelOptimizer, loss=BinaryCrossentropy(), metrics=['accuracy'])
reduceLR_callback = ReduceLROnPlateau(
monitor="val_loss",
factor=0.5,
patience=7,
mode="min",
min_lr=1e-5,
)
checkpoint_dir = '/content/drive/MyDrive/Project1/Weights/10s'
# Define CSV logger callback
csv_logger = CSVLogger(f'{checkpoint_dir}/training_log_fold_{i + 1}_CNN_10s.csv', append=True)
backup_restore_callback = BackupAndRestore(backup_dir=f'{checkpoint_dir}/backup_fold_{i + 1}')
# Train the model with training data
history = model.fit(
train_generator,
epochs=epochs,
steps_per_epoch=len(train_files) // batch_size,
validation_data=val_generator,
validation_steps=len(val_files) // batch_size,
callbacks=[reduceLR_callback, csv_logger, backup_restore_callback],
shuffle=False
)
i expect to have nearly accuracy and loss after reload the model
mb.joker98 is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.