Example Code
<code> name = "meta-llama/Meta-Llama-3-8B-Instruct"
auth_token = ""
tokenizer = AutoTokenizer.from_pretrained(name,use_auth_token=auth_token)
bnb_config = BitsAndBytesConfig(
load_in_8bit=True,
)
model_config = AutoConfig.from_pretrained(
name,
use_auth_token=auth_token,
tempreature=0.1,
)
model = AutoModelForCausalLM.from_pretrained(
name,
trust_remote_code=True,
config=model_config,
quantization_config=bnb_config,
device_map='auto',
use_auth_token=auth_token,
)
streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=4096, device_map="auto", streamer = streamer)
llm = HuggingFacePipeline(pipeline=pipe)
@tool
def some_custom_tool(input_string: str) -> str:
"""Executes some work and returns a success message if successfull else it return the error message"""
return "SUCCESS"
tools = [some_custom_tool]
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
f"""
You are an Assistant......
""",
),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
llm_with_tools = llm.bind_tools(tools)
agent = (
{
"input": lambda x: x["input"],
"agent_scratchpad": lambda x: format_to_openai_tool_messages(
x["intermediate_steps"]
),
}
| prompt
| llm
| JsonOutputParser()
)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, return_intermediate_steps= True)
</code>
<code> name = "meta-llama/Meta-Llama-3-8B-Instruct"
auth_token = ""
tokenizer = AutoTokenizer.from_pretrained(name,use_auth_token=auth_token)
bnb_config = BitsAndBytesConfig(
load_in_8bit=True,
)
model_config = AutoConfig.from_pretrained(
name,
use_auth_token=auth_token,
tempreature=0.1,
)
model = AutoModelForCausalLM.from_pretrained(
name,
trust_remote_code=True,
config=model_config,
quantization_config=bnb_config,
device_map='auto',
use_auth_token=auth_token,
)
streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=4096, device_map="auto", streamer = streamer)
llm = HuggingFacePipeline(pipeline=pipe)
@tool
def some_custom_tool(input_string: str) -> str:
"""Executes some work and returns a success message if successfull else it return the error message"""
return "SUCCESS"
tools = [some_custom_tool]
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
f"""
You are an Assistant......
""",
),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
llm_with_tools = llm.bind_tools(tools)
agent = (
{
"input": lambda x: x["input"],
"agent_scratchpad": lambda x: format_to_openai_tool_messages(
x["intermediate_steps"]
),
}
| prompt
| llm
| JsonOutputParser()
)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, return_intermediate_steps= True)
</code>
name = "meta-llama/Meta-Llama-3-8B-Instruct"
auth_token = ""
tokenizer = AutoTokenizer.from_pretrained(name,use_auth_token=auth_token)
bnb_config = BitsAndBytesConfig(
load_in_8bit=True,
)
model_config = AutoConfig.from_pretrained(
name,
use_auth_token=auth_token,
tempreature=0.1,
)
model = AutoModelForCausalLM.from_pretrained(
name,
trust_remote_code=True,
config=model_config,
quantization_config=bnb_config,
device_map='auto',
use_auth_token=auth_token,
)
streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=4096, device_map="auto", streamer = streamer)
llm = HuggingFacePipeline(pipeline=pipe)
@tool
def some_custom_tool(input_string: str) -> str:
"""Executes some work and returns a success message if successfull else it return the error message"""
return "SUCCESS"
tools = [some_custom_tool]
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
f"""
You are an Assistant......
""",
),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
llm_with_tools = llm.bind_tools(tools)
agent = (
{
"input": lambda x: x["input"],
"agent_scratchpad": lambda x: format_to_openai_tool_messages(
x["intermediate_steps"]
),
}
| prompt
| llm
| JsonOutputParser()
)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, return_intermediate_steps= True)
Description
I am trying to bind a custom tool with the LLM just like ChatOpenAI but i am getting the following error. It looks like the bind_tools does exist in HuggingFacePipeline. Is there a way to bind a custom tool to an LLM from HuggingFacePipeline?
AttributeError: 'HuggingFacePipeline' object has no attribute 'bind_tools'
System Info:
<code>langchain==0.2.6
langchain-community==0.2.6
langchain-core==0.2.11
langchain-openai==0.1.14
langchain-text-splitters==0.2.2
Python 3.10.13
</code>
<code>langchain==0.2.6
langchain-community==0.2.6
langchain-core==0.2.11
langchain-openai==0.1.14
langchain-text-splitters==0.2.2
Python 3.10.13
</code>
langchain==0.2.6
langchain-community==0.2.6
langchain-core==0.2.11
langchain-openai==0.1.14
langchain-text-splitters==0.2.2
Python 3.10.13
I am doing this on Kaggle GPU t4x2