I am attempting to fine-tune Llama3.2-1b model from huggingface on colab using A100 gpu, following this guide.
My understanding is that the A100 gpu supports bf16, but when attempting to finetune, it throws the error that
RuntimeError Traceback (most recent call last)
<ipython-input-23-e92ea0b437bd> in <cell line: 9>()
8 # Generate output (inference)
9 with torch.no_grad(): # Disable gradient calculation for inference
---> 10 output = model.generate(
11 inputs["input_ids"],
12 num_return_sequences=1, # Only generate one output
8 frames
/usr/local/lib/python3.10/dist-packages/torch/nn/modules/linear.py in forward(self, input)
123
124 def forward(self, input: Tensor) -> Tensor:
--> 125 return F.linear(input, self.weight, self.bias)
126
127 def extra_repr(self) -> str:
RuntimeError: expected scalar type BFloat16 but found Float
My code is as in the article with some tweaks:
import os
import sys
import torch
from datasets import load_dataset
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
BitsAndBytesConfig,
HfArgumentParser,
TrainingArguments,
pipeline,
logging,
)
from peft import LoraConfig, PeftModel
from trl import SFTTrainer
from datasets import DatasetDict
torch.cuda.empty_cache()
def create_llama_prompt(system_prompt: str, user_message: str, assistant_message:str = None) -> str:
prompt = f"""
<|begin_of_text|><|start_header_id|>system<|end_header_id|>
{system_prompt}<|eot_id|><|start_header_id|>user<|end_header_id|>
{user_message}<|eot_id|><|start_header_id|>assistant<|end_header_id|>
"""
if assistant_message:
prompt += f"{assistant_message}<|eot_id|>"
return prompt
model_name = "meta-llama/Llama-3.2-1B"
dataset_name = "therapara/summary-of-news-articles_new"
new_model = "./models/llama-3.2-1B-summarisation"
lora_r = 64 #lora attention dimension/ rank
lora_alpha = 16 #lora scaling parameter
lora_dropout = 0.1 #lora dropout probability
use_4bit = True
bnb_4bit_compute_dtype = "float16"
bnb_4bit_quant_type = "nf4"
use_nested_quant = False
output_dir = "./results"
num_train_epochs = 3
#enable fp16/bf16 training (set bf16 to True when using A100 GPU in google colab)
fp16 = False
bf16 = True
#training params here
device_map = {"":0}
dataset = load_dataset(dataset_name)
#load tokenizer and model with QLoRA config
compute_dtype = getattr(torch, bnb_4bit_compute_dtype)
bnb_config = BitsAndBytesConfig(
load_in_4bit = use_4bit,
bnb_4bit_quant_type = bnb_4bit_quant_type,
bnb_4bit_compute_dtype = compute_dtype,
bnb_4bit_use_double_quant = use_nested_quant,)
#load base model
model = AutoModelForCausalLM.from_pretrained(
model_name,
quantization_config = bnb_config,
device_map = device_map,
)
model.config.use_cache = False
model.config.pretraining_tp = 1
#Load LLama tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_name,trust_remote_code = True)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.padding_side = "right"
sys_prompt = """You are an expert in summarizing news articles. Your task is to generate clear, concise,
and accurate summaries of provided news articles. Only summarize the content that is presented and do not
add additional information. If the article lacks context or cannot be summarized effectively, state this
clearly. Maintain neutrality and focus on the core facts. Your summaries should be succinct and avoid unnecessary details.
"""
def preprocess_function(examples):
formatted_texts = []
for article, summarisation in zip(examples["article"], examples["highlights"]):
formatted_text = create_llama_prompt(system_prompt=sys_prompt, user_message=article, assistant_message=summarisation)
formatted_texts.append(formatted_text)
return {"formatted_text": formatted_texts}
# Apply preprocessing to the dataset
formatted_dataset = dataset.map(preprocess_function, batched=True, num_proc=8)
def tokenize_function(examples):
return tokenizer(examples["formatted_text"], truncation=True, padding="max_length", max_length=max_seq_length)
# tokenized_dataset = formatted_dataset.map(tokenize_function, batched=True, num_proc=8, remove_columns=["article", "highlights", "id"])
tokenized_dataset = formatted_dataset.map(tokenize_function, batched=True, num_proc=8)
print(tokenized_dataset)
#Load QLoRA config
peft_config = LoraConfig(
lora_alpha = lora_alpha,
lora_dropout = lora_dropout,
r = lora_r,
bias = "none",
task_type = "CAUSAL_LM",
)
#Set Training parameters
training_arguments = TrainingArguments(
output_dir = output_dir,
num_train_epochs = num_train_epochs,
per_device_train_batch_size = per_device_train_batch_size,
gradient_accumulation_steps = gradient_accumulation_steps,
optim = optim,
save_steps = save_steps,
logging_steps = logging_steps,
learning_rate = learning_rate,
fp16 = fp16,
bf16 = bf16,
max_grad_norm = max_grad_norm,
weight_decay = weight_decay,
lr_scheduler_type = lr_scheduler_type,
warmup_ratio = warmup_ratio,
group_by_length = group_by_length,
max_steps = max_steps,
report_to = "tensorboard",
)
print("Set Training Args")
#SFT Trainer
trainer = SFTTrainer(
model = model,
train_dataset = tokenized_dataset["train"],
eval_dataset = tokenized_dataset["validation"],
peft_config = peft_config,
# dataset_text_field = "formatted_text",
# max_seq_length = max_seq_length,
args = training_arguments,
tokenizer = tokenizer,
# packing = packing,
)
print("Create SFTTrainer")
print("Starting Training")
with torch.autocast("cuda"):
trainer.train()
print("Training Ended")
#save trained model
trainer.model.save_pretrained(new_model)
print("Saved pretrained model")
# Ignore warnings
logging.set_verbosity(logging.CRITICAL)
# Run text generation pipeline with our next model
instance_0 = tokenized_dataset["test"][0]
prompt = create_llama_prompt(sys_prompt, instance_0["article"])
print(prompt)
pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_new_tokens=500)
result = pipe(prompt)
print(result[0]['generated_text'])
print("Merging model with LoRA weights")
# Reload model in FP16 and merge it with LoRA weights
base_model = AutoModelForCausalLM.from_pretrained(
model_name,
low_cpu_mem_usage=True,
return_dict=True,
torch_dtype=torch.float16,
device_map=device_map,
)
model = PeftModel.from_pretrained(base_model, new_model)
model = model.merge_and_unload()
# Reload tokenizer to save it
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.padding_side = "right"
I have attempted to load the models in bf16 with
model = AutoModelForCausalLM.from_pretrained(
model_name,
quantization_config = bnb_config,
torch_dtype = torch.bfloat16,
device_map = device_map,
)
...
base_model = AutoModelForCausalLM.from_pretrained(
model_name,
low_cpu_mem_usage=True,
return_dict=True,
torch_dtype=torch.bfloat16,
device_map=device_map,
)
New contributor
d3_zander is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.