I’m trying to finetune GPT-2 on my own dataset and when I train it, it all goes smoothly until it reaches the evaluation step (every 500) and throws this error
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 7.72 GiB. GPU 0 has a total capacity of 24.00 GiB of which 13.38 GiB is free. Process 13650 has 17179869184.00 GiB memory in use. Process 48439 has 17179869184.00 GiB memory in use. Process 21062 has 17179869184.00 GiB memory in use. Process 40988 has 17179869184.00 GiB memory in use. Including non-PyTorch memory, this process has 17179869184.00 GiB memory in use. Of the allocated memory 9.14 GiB is allocated by PyTorch, and 187.59 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
I have no clue why this is happening and I’ve tried everything that I could think of.
I’m using a training dataset of 10k lines and a validation dataset of 1000 lines. c
My code: (I’m running it in WSL)
import torch.utils
import torch.utils.data
from transformers import (
GPT2LMHeadModel, GPT2Tokenizer, TextDataset, DataCollatorForLanguageModeling, Trainer, TrainingArguments, EarlyStoppingCallback, GPT2Config, get_linear_schedule_with_warmup, TrainerCallback)
import torch, np, GPUtil, os
from sklearn.metrics import accuracy_score
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'
BATCH_SIZE = 1
EPOCHS = 100
OUTPUT_DIR = "Models/"
SAVE_STEPS = 1000
EVAL_STEPS = 500
ACCUMULATION_STEPS = 2
EARLY_STOPPING_PATIENCE = 3
DROPOUT = 0.25
MODEL_NAME = f"gpt2-bs{BATCH_SIZE}-ep{EPOCHS}-ss{SAVE_STEPS}-es{EVAL_STEPS}-esp{EARLY_STOPPING_PATIENCE}-drpt-{DROPOUT}"
DATASET_PATH = "trainingData.txt"
VAL_DATASET_PATH = "validationData.txt"
BLOCK_SIZE = 128
# Check if a GPU is available and set the device
device = "cuda" if torch.cuda.is_available() else "cpu"
print("Training on device: {}".format(device))
# Load the pre-trained GPT model and tokenizer
model_name = 'gpt2' # Change this to a larger model
model = GPT2LMHeadModel.from_pretrained(model_name).to(device)
config = GPT2Config.from_pretrained(model_name, dropout=DROPOUT)
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
print("Loaded model and tokenizer from {}".format(model_name))
print("Transfering model to device: {}".format(device))
model = GPT2LMHeadModel.from_pretrained(model_name, config=config).to(device)
print("Transfer Complete")
# Load the text messages dataset
dataset_path = DATASET_PATH # Replace with the path to your file
dataset = TextDataset(tokenizer=tokenizer, file_path=dataset_path, block_size=BLOCK_SIZE)
train_dataloader = torch.utils.data.DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True)
print("Created dataset from {}".format(dataset_path))
# Load the validation dataset
val_dataset_path = "validationData.txt" # Replace with the path to your file
eval_dataset = TextDataset(tokenizer=tokenizer, file_path=val_dataset_path, block_size=BLOCK_SIZE)
print("Created evaluation dataset from {}".format(val_dataset_path))
# Prepare the data collator for language modeling
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
# Define the optimizer
optimizer = torch.optim.AdamW(model.parameters(), lr=0.1)
# scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.95)
# Calculate the total number of training steps
total_steps = len(train_dataloader) * EPOCHS
# Define the training arguments
training_args = TrainingArguments(
output_dir=OUTPUT_DIR,
overwrite_output_dir=True,
num_train_epochs=EPOCHS,
per_device_train_batch_size=BATCH_SIZE,
per_device_eval_batch_size=1,
save_steps=SAVE_STEPS,
save_total_limit=2,
evaluation_strategy="steps",
load_best_model_at_end=True,
metric_for_best_model="loss",
weight_decay=0.01,
learning_rate=1e-6, # Tune the learning rate
# gradient_accumulation_steps=ACCUMULATION_STEPS,
eval_steps=EVAL_STEPS,
fp16=True,
# no_cuda=True,
)
# Create the Trainer instance and start training
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=0, num_training_steps=total_steps
)
def compute_metrics(eval_pred):
predictions, labels = eval_pred
predictions = np.argmax(predictions, axis=1)
loss = torch.nn.functional.cross_entropy(predictions, labels)
return {"accuracy": accuracy_score(labels, predictions), "loss": loss.item()}
class GPUMonitorCallback(TrainerCallback):
def on_step_end(self, args, state, control, **kwargs):
gpu = GPUtil.getGPUs()[0] # Assuming you have 1 GPU
print(f"GPU Utilization: {gpu.load*100}%, Memory Usage: {gpu.memoryUsed}, Free Memory: {gpu.memoryFree}, GPU Temperature: {gpu.temperature}°C")
trainer = Trainer(
model=model,
args=training_args,
data_collator=data_collator,
train_dataset=dataset,
eval_dataset=eval_dataset,
# callbacks=[GPUMonitorCallback()],
callbacks=[EarlyStoppingCallback(early_stopping_patience=EARLY_STOPPING_PATIENCE), GPUMonitorCallback()],
optimizers=(optimizer, scheduler),
compute_metrics=compute_metrics,
)
print("Created data collator and trainer. Starting training...")
trainer.train().half()
print("Training completed. Saving the model...")
# Save the trained model and tokenizer
model.save_pretrained(MODEL_NAME)
tokenizer.save_pretrained(MODEL_NAME)
print("Model saved to {}".format(MODEL_NAME))