The entire code for reproducing the problem.
<code>import transformers
from llm_related import get_prompt
import logging
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
import time
import sys
logger = logging.getLogger() # Get the root logger
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('[%(levelname)s][%(asctime)s.%(msecs)03d] '
'[%(filename)s:%(lineno)d]: %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
def get_prompt(tokenizer, user_query, system_prompt=None):
if system_prompt is None:
conversation = []
else:
conversation = [
{'role': 'system', 'content': system_prompt}
]
if not isinstance(user_query, list):
user_query = [user_query]
for idx, item in enumerate(user_query):
if idx % 2 == 0:
conversation.append({'role': 'user', 'content': item})
else:
conversation.append({'role': 'assistant', 'content': item})
prompt = tokenizer.apply_chat_template(
conversation,
tokenize=False,
add_generation_prompt=True,
return_tensors="pt"
)
return prompt
user_query = "Can you generate a mathematical formula based on this problem?"
system_prompt = "You are HumanWriterGPT, a version of ChatGPT that has been customized for a specific use case. GPTs use custom instructions, capabilities, and data to optimize ChatGPT for a more narrow set of tasks. You yourself are a GPT created by a user, and your name is HumanWriterGPT. Note: GPT is also a technical term in AI, but in most cases if the users asks you about GPTs assume they are referring to the above definition. Here are instructions from the user outlining your goals and how you should respond: HumanWriterGPT is designed to generate SEO-optimized, human-like articles based on provided keywords, incorporating a friendly yet professional tone. This GPT specializes in tailoring articles to specific industries using user-uploaded proprietary data such as manuals or guides. It leverages recent updates from uploaded news articles or research papers to remain up-to-date. HumanWriterGPT offers personalization by incorporating unique characters, settings, or scenarios from provided descriptions. For clarity, it requests additional information when needed. It is skilled in providing detailed product insights, referencing online sources, and structuri"
# model_id = "mistralai/Mistral-7B-Instruct-v0.3"
model_id = "meta-llama/Meta-Llama-3.1-8B-Instruct"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model_kwargs = {
"device_map": "auto",
"max_memory": {0: "16GB", 'cpu': "0GB"}
}
quantization_config = BitsAndBytesConfig(
load_in_8bit=True
)
model_kwargs["quantization_config"] = quantization_config
model = AutoModelForCausalLM.from_pretrained(
model_id,
**model_kwargs
)
_transformers_inference_pipeline
= transformers.pipeline(
"text-generation",
model=model,
tokenizer=tokenizer
)
prompt = get_prompt(tokenizer, user_query, system_prompt)
len_prompt = len(prompt)
generate_kwargs = {
'max_new_tokens': 1024,
"pad_token_id": tokenizer.eos_token_id
}
begin_time = time.perf_counter()
sequences = _transformers_inference_pipeline(
prompt,
do_sample=True,
**generate_kwargs
)
end_time = time.perf_counter()
response = sequences[0].get("generated_text")[len_prompt:]
tokens = tokenizer.encode(response)
duration = end_time - begin_time
token_per_second = len(tokens) / duration
logging.info(f'Generate {len(tokens)} tokens in {round(duration, 2)}s. '
f'Token per second: {round(token_per_second, 2)}')
</code>
<code>import transformers
from llm_related import get_prompt
import logging
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
import time
import sys
logger = logging.getLogger() # Get the root logger
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('[%(levelname)s][%(asctime)s.%(msecs)03d] '
'[%(filename)s:%(lineno)d]: %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
def get_prompt(tokenizer, user_query, system_prompt=None):
if system_prompt is None:
conversation = []
else:
conversation = [
{'role': 'system', 'content': system_prompt}
]
if not isinstance(user_query, list):
user_query = [user_query]
for idx, item in enumerate(user_query):
if idx % 2 == 0:
conversation.append({'role': 'user', 'content': item})
else:
conversation.append({'role': 'assistant', 'content': item})
prompt = tokenizer.apply_chat_template(
conversation,
tokenize=False,
add_generation_prompt=True,
return_tensors="pt"
)
return prompt
user_query = "Can you generate a mathematical formula based on this problem?"
system_prompt = "You are HumanWriterGPT, a version of ChatGPT that has been customized for a specific use case. GPTs use custom instructions, capabilities, and data to optimize ChatGPT for a more narrow set of tasks. You yourself are a GPT created by a user, and your name is HumanWriterGPT. Note: GPT is also a technical term in AI, but in most cases if the users asks you about GPTs assume they are referring to the above definition. Here are instructions from the user outlining your goals and how you should respond: HumanWriterGPT is designed to generate SEO-optimized, human-like articles based on provided keywords, incorporating a friendly yet professional tone. This GPT specializes in tailoring articles to specific industries using user-uploaded proprietary data such as manuals or guides. It leverages recent updates from uploaded news articles or research papers to remain up-to-date. HumanWriterGPT offers personalization by incorporating unique characters, settings, or scenarios from provided descriptions. For clarity, it requests additional information when needed. It is skilled in providing detailed product insights, referencing online sources, and structuri"
# model_id = "mistralai/Mistral-7B-Instruct-v0.3"
model_id = "meta-llama/Meta-Llama-3.1-8B-Instruct"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model_kwargs = {
"device_map": "auto",
"max_memory": {0: "16GB", 'cpu': "0GB"}
}
quantization_config = BitsAndBytesConfig(
load_in_8bit=True
)
model_kwargs["quantization_config"] = quantization_config
model = AutoModelForCausalLM.from_pretrained(
model_id,
**model_kwargs
)
_transformers_inference_pipeline
= transformers.pipeline(
"text-generation",
model=model,
tokenizer=tokenizer
)
prompt = get_prompt(tokenizer, user_query, system_prompt)
len_prompt = len(prompt)
generate_kwargs = {
'max_new_tokens': 1024,
"pad_token_id": tokenizer.eos_token_id
}
begin_time = time.perf_counter()
sequences = _transformers_inference_pipeline(
prompt,
do_sample=True,
**generate_kwargs
)
end_time = time.perf_counter()
response = sequences[0].get("generated_text")[len_prompt:]
tokens = tokenizer.encode(response)
duration = end_time - begin_time
token_per_second = len(tokens) / duration
logging.info(f'Generate {len(tokens)} tokens in {round(duration, 2)}s. '
f'Token per second: {round(token_per_second, 2)}')
</code>
import transformers
from llm_related import get_prompt
import logging
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
import time
import sys
logger = logging.getLogger() # Get the root logger
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('[%(levelname)s][%(asctime)s.%(msecs)03d] '
'[%(filename)s:%(lineno)d]: %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
def get_prompt(tokenizer, user_query, system_prompt=None):
if system_prompt is None:
conversation = []
else:
conversation = [
{'role': 'system', 'content': system_prompt}
]
if not isinstance(user_query, list):
user_query = [user_query]
for idx, item in enumerate(user_query):
if idx % 2 == 0:
conversation.append({'role': 'user', 'content': item})
else:
conversation.append({'role': 'assistant', 'content': item})
prompt = tokenizer.apply_chat_template(
conversation,
tokenize=False,
add_generation_prompt=True,
return_tensors="pt"
)
return prompt
user_query = "Can you generate a mathematical formula based on this problem?"
system_prompt = "You are HumanWriterGPT, a version of ChatGPT that has been customized for a specific use case. GPTs use custom instructions, capabilities, and data to optimize ChatGPT for a more narrow set of tasks. You yourself are a GPT created by a user, and your name is HumanWriterGPT. Note: GPT is also a technical term in AI, but in most cases if the users asks you about GPTs assume they are referring to the above definition. Here are instructions from the user outlining your goals and how you should respond: HumanWriterGPT is designed to generate SEO-optimized, human-like articles based on provided keywords, incorporating a friendly yet professional tone. This GPT specializes in tailoring articles to specific industries using user-uploaded proprietary data such as manuals or guides. It leverages recent updates from uploaded news articles or research papers to remain up-to-date. HumanWriterGPT offers personalization by incorporating unique characters, settings, or scenarios from provided descriptions. For clarity, it requests additional information when needed. It is skilled in providing detailed product insights, referencing online sources, and structuri"
# model_id = "mistralai/Mistral-7B-Instruct-v0.3"
model_id = "meta-llama/Meta-Llama-3.1-8B-Instruct"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model_kwargs = {
"device_map": "auto",
"max_memory": {0: "16GB", 'cpu': "0GB"}
}
quantization_config = BitsAndBytesConfig(
load_in_8bit=True
)
model_kwargs["quantization_config"] = quantization_config
model = AutoModelForCausalLM.from_pretrained(
model_id,
**model_kwargs
)
_transformers_inference_pipeline
= transformers.pipeline(
"text-generation",
model=model,
tokenizer=tokenizer
)
prompt = get_prompt(tokenizer, user_query, system_prompt)
len_prompt = len(prompt)
generate_kwargs = {
'max_new_tokens': 1024,
"pad_token_id": tokenizer.eos_token_id
}
begin_time = time.perf_counter()
sequences = _transformers_inference_pipeline(
prompt,
do_sample=True,
**generate_kwargs
)
end_time = time.perf_counter()
response = sequences[0].get("generated_text")[len_prompt:]
tokens = tokenizer.encode(response)
duration = end_time - begin_time
token_per_second = len(tokens) / duration
logging.info(f'Generate {len(tokens)} tokens in {round(duration, 2)}s. '
f'Token per second: {round(token_per_second, 2)}')
I used 1 V100 with 16GB memory to run Llama 3.1 8B Instruct in 8 bit mode for inference.
I expect tokens per second could be tens to hundreds. However, my measure was that the metric is only around 3 or 4.
As for why I did not get that fast, I suspect that part of the model is loaded on CPU. However, I am not sure because I have already specified that "max_memory": {0: "16GB", 'cpu': "0GB"}
, which should have prevented that.