I am new to fine tuning LLMs and I have been trying to run the notebooks provided by UnSlothAI. For this question, I am running the code for fine-tuning LLaMa 3.1 8B model as posted here
This colab notebook uses a huggingface dataset on which the LLM is fine tuned.
alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
### Instruction:
{}
### Input:
{}
### Response:
{}"""
EOS_TOKEN = tokenizer.eos_token # Must add EOS_TOKEN
def formatting_prompts_func(examples):
instructions = examples["instruction"]
inputs = examples["input"]
outputs = examples["output"]
texts = []
for instruction, input, output in zip(instructions, inputs, outputs):
# Must add EOS_TOKEN, otherwise your generation will go on forever!
text = alpaca_prompt.format(instruction, input, output) + EOS_TOKEN
texts.append(text)
return { "text" : texts, }
pass
from datasets import load_dataset
dataset = load_dataset("yahma/alpaca-cleaned", split = "train")
dataset = dataset.map(formatting_prompts_func, batched = True,)
I want to use my own data, so I have introduced this small change
from transformers import T5ForConditionalGeneration, T5Tokenizer
from datasets import Dataset
import nltk
nltk.download('punkt', quiet=True)
def load_model():
model_name = "mrm8488/t5-base-finetuned-question-generation-ap"
tokenizer = T5Tokenizer.from_pretrained(model_name)
model = T5ForConditionalGeneration.from_pretrained(model_name)
return tokenizer, model
def generate_question(sentence, tokenizer, model):
input_text = "generate question: " + sentence
input_ids = tokenizer(input_text, return_tensors="pt").input_ids
outputs = model.generate(input_ids, max_length=64, num_return_sequences=1)
question = tokenizer.decode(outputs[0], skip_special_tokens=True)
question = question.replace("question: ", "", 1).strip()
question = question[0].upper() + question[1:]
return question
def process_paragraph(paragraph, tokenizer, model):
sentences = nltk.sent_tokenize(paragraph)
questions = [generate_question(sentence, tokenizer, model) for sentence in sentences]
return sentences, questions
def create_dataset(sentences, questions):
return Dataset.from_dict({
'output': sentences,
'input': [''] * len(sentences),
'instruction': questions
})
# Load the model
tokenizer, model = load_model()
# Example usage
paragraph = """Whales are magnificent marine mammals. These colossal creatures inhabit oceans worldwide.
Whales are divided into two main groups: baleen whales and toothed whales.
Baleen whales have a filtering system in their mouths.
Toothed whales, including dolphins and porpoises, have teeth for catching prey.
Whales are known for their intelligence and complex social structures.
These giants of the sea vary greatly in size.
The blue whale is the largest animal ever known to have existed.
Conservation efforts are crucial for protecting whale populations."""
sentences, questions = process_paragraph(paragraph, tokenizer, model)
# Create the dataset
dataset_whales = create_dataset(sentences, questions)
# Print information about the dataset
print(dataset_whales)
print("nFirst few examples:")
print(dataset_whales[:3])
dataset_whales = dataset_whales.map(formatting_prompts_func, batched = True,)
The model was able to train properly but when I run the following inference code
# alpaca_prompt = Copied from above
FastLanguageModel.for_inference(model) # Enable native 2x faster inference
inputs = tokenizer(
[
alpaca_prompt.format(
"Continue the fibonnaci sequence.", # instruction
"1, 1, 2, 3, 5, 8", # input
"", # output - leave this blank for generation!
)
], return_tensors = "pt").to("cuda")
outputs = model.generate(**inputs, max_new_tokens = 64, use_cache = True)
tokenizer.batch_decode(outputs)
It gives me the error
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-37-64c50bf8aa34> in <cell line: 12>()
10 ], return_tensors = "pt").to("cuda")
11
---> 12 outputs = model.generate(**inputs, max_new_tokens = 64, use_cache = True)
13 tokenizer.batch_decode(outputs)
2 frames
/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py in decorate_context(*args, **kwargs)
113 def decorate_context(*args, **kwargs):
114 with ctx_factory():
--> 115 return func(*args, **kwargs)
116
117 return decorate_context
/usr/local/lib/python3.10/dist-packages/unsloth/models/llama.py in _fast_generate(*args, **kwargs)
1191
1192 # Autocasted
-> 1193 with torch.autocast(device_type = device_type, dtype = dtype):
1194 output = generate(*args, **kwargs)
1195 pass
/usr/local/lib/python3.10/dist-packages/torch/amp/autocast_mode.py in __enter__(self)
363 self.prev = torch.is_autocast_enabled()
364 self.prev_fastdtype = torch.get_autocast_gpu_dtype()
--> 365 torch.set_autocast_gpu_dtype(self.fast_dtype) # type: ignore[arg-type]
366 torch.set_autocast_enabled(self._enabled)
367 torch.autocast_increment_nesting()
TypeError: dtype must be a torch.dtype (got str)
What changes should I make for the code to work? Thanks in advance!