I’m working on Brain Tumor Segmentation Task using BRATS19 Dataset. I’m getting an issue when trying to work with more than one epoch in fit method.
As per my analysis, on_epoch_end method of DataGenerator should be called internally to reset indices and provide valid indices for next epoch data generation. It shows AttributeError when next epoch starts and model crash, (AttributeError as next epoch starts with invalid indices). However, on_epoch_end should automatically clear indices.
# from scipy.ndimage import rotate, zoom, shift
# from numpy.random import randint, uniform
# from skimage.transform import resize
# from sklearn.preprocessing import MinMaxScaler # Class for scaling features to a range
class DataGenerator(keras.utils.Sequence):
def __init__(self, list_IDs, batch_size=2, dim=(128,128,128), shuffle=True, channels=3, num_class=4, **kwargs):
super().__init__(**kwargs) # Call super constructor
self.dim = dim
self.batch_size = batch_size
self.list_IDs = list_IDs
self.shuffle = shuffle
self.channels = channels
self.num_class = num_class
self.on_epoch_end()
def __len__(self):
return (len(self.list_IDs) // self.batch_size)
def __getitem__(self, index):
indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]
list_IDs_temp = [self.list_IDs[k] for k in indexes]
X, y = self.__data_generation(list_IDs_temp)
return X, y
def on_epoch_end(self):
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle:
np.random.shuffle(self.indexes)
def __data_generation(self, list_IDs_temp):
X = np.zeros((self.batch_size, *self.dim, self.channels), dtype=np.float32)
y = np.zeros((self.batch_size, *self.dim, self.num_class), dtype=np.float32)
for i, img in enumerate(list_IDs_temp):
t1_path, t2_path, t1ce_path, flair_path, mask_path = img
image_t1 = nib.load(t1_path).get_fdata().astype(np.float32)
image_t2 = nib.load(t2_path).get_fdata().astype(np.float32)
image_t1ce = nib.load(t1ce_path).get_fdata().astype(np.float32)
image_flair = nib.load(flair_path).get_fdata().astype(np.float32)
x = np.stack([image_t1ce, image_t2, image_flair], axis=3)
x = x[56:184, 56:184, 13:141]
mask = nib.load(mask_path).get_fdata()
mask = mask.astype(np.uint8)
mask[mask == 4] = 3
mask = keras.utils.to_categorical(mask, self.num_class)
mask = mask[56:184, 56:184, 13:141]
# Apply data augmentation with probability 10%
if np.random.rand() < 0.10:
x, mask = self.apply_augmentation(x, mask)
# Resize to target dimensions
x = resize(x, self.dim, anti_aliasing=True)
mask = resize(mask, self.dim, anti_aliasing=False)
# Normalize input data
X[i] = x / 255.0
y[i] = mask/255.0
return X, y
# Apply data augmentation to the image and mask
def apply_augmentation(self, x, mask):
# Randomly choose augmentation parameters
angle = uniform(-10, 10) # Rotation angle
zoom_factor = uniform(0.95, 1.05) # Zoom factor
# Determine the dimensionality of the input array
# Apply rotation to the image and mask
x_augmented = rotate(x, angle, reshape=False, order=3)
mask_augmented = rotate(mask, angle, reshape=False, order=0)
# Apply zoom to the image and mask
x_augmented = zoom(x_augmented, zoom_factor, order=3)
mask_augmented = zoom(mask_augmented, zoom_factor, order=0)
# Apply shift to the image and mask
shift_factor = tuple(randint(-2.5, 2.5) for _ in range(x_augmented.ndim))
x_augmented = shift(x_augmented, shift_factor, order=3)
mask_augmented = shift(mask_augmented, shift_factor, order=0)
# Resize augmented data to match original shape
x_augmented = resize(x_augmented, x.shape[:-1], order=3, anti_aliasing=True)
mask_augmented = resize(mask_augmented, mask.shape[:-1], order=0, anti_aliasing=False)
return x_augmented, mask_augmented
Training_datagen = DataGenerator(Train, batch_size=2)
Validation_datagen = DataGenerator(Validation, batch_size=2)
# dice_loss and CustomMetrics() code not provided here
loss = dice_loss
metrics = [CustomMetrics()]
batch_size = 2
learning_rate = 0.0001
epochs = 30
optim = tf.keras.optimizers.Adam(learning_rate=learning_rate)
train_steps_per_epoch = len(Train)//batch_size
val_steps_per_epoch = len(Validation)//batch_size
model = sm.Unet(backbone_name='vgg16', input_shape=(128, 128, 128, 3), classes=4, activation='softmax', encoder_weights='imagenet')
model.compile(optimizer=optim, loss=loss, metrics=metrics)
from tensorflow.keras.callbacks import Callback, ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TensorBoard, CSVLogger
# Define callbacks
checkpoint = ModelCheckpoint(filepath='model_weights.keras', monitor='val_loss', mode='min', verbose=1, save_best_only=True)
early_stopping = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=10, restore_best_weights=True)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5, min_lr=1e-6)
csv_logger = CSVLogger(filename='training.log')
history = model.fit(Training_datagen, epochs = 30, validation_data = Validation_datagen, verbose = 1,steps_per_epoch = train_steps_per_epoch, validation_steps = val_steps_per_epoch,callbacks = [checkpoint, early_stopping, reduce_lr, csv_logger])
Anybody having experience with segmentation tasks, Data Generators…. Kindly help to get away with this issue.
I want it to be executed smoothly.