I am trying to get Langchain Bedrock and Anthropic to use a tool/function I have defined in my python file. It does kick off the right tool use and seems to be returning output_tokens count but no output.
My first query does kick off the right tool called get_weather. But returns no output.
My second query does not kick off any tool usage correctly, as expected, and returns answer.
Code:
import os # Access to env varibales
from dotenv import find_dotenv, load_dotenv # Set up to read API keys from .env #install python-dotenv
dotenv_path = find_dotenv() # find_dotenv wil find teh .env up te parent path
load_dotenv(dotenv_path) # now you have access to os.environ[HUGGINGFACEHUB_API_TOKEN]
import json
import boto3
from langchain_community.llms import Bedrock
aws_region = "us-east-1"
model_id = "anthropic.claude-v2:1"
model_id = "anthropic.claude-3-sonnet-20240229-v1:0"
model_id = "anthropic.claude-3-sonnet-20240229-v1:0"
model = boto3.client(service_name="bedrock-runtime", region_name="us-east-1")
# Define a custom tool
def get_weather(input: str) -> str:
# Replace this with your actual tool implementation
return f"The weather in {input} is sunny and sujata."
# Define the tool schema
tool_schema = {
"type": "object",
"properties": {
"location": {"type": "string", "description": "The city and state, e.g. San Francisco, CA"}
},
"required": ["location"]
}
# Define the tool
tool = [{
"name": "get_weather",
"description": "Get the current weather in a given location",
"input_schema": tool_schema
},]
# Define the tools to use
tools = [
{"name": "get_weather", "description": "Get the current weather in a given location"},
# {"name": "get_temperature", "description": "Get the current temperature in a given location"}
]
# Create the request body
def query(prompt):
body = json.dumps({
"max_tokens": 256,
"messages": [{"role": "user", "content": prompt}],
"anthropic_version": "bedrock-2023-05-31",
"tools": tool
})
# Define the prompt
response = model.invoke_model(body=body, modelId=model_id)
print("------------------------------------")
print("q: " + prompt)
# Print the response
#print(response["body"])
streaming_body=response["body"]
# Print the contents of the StreamingBody object
s= streaming_body.read().decode('utf-8')
sJson= json.loads(s)
#print("a: "+ json.dumps(sJson["content"],indent=4))
print(json.dumps(sJson,indent=4))#["content"])
query("What is the weather like in San Francisco?")
query("What is teh capital of India?")
output:
------------------------------------
q: What is the weather like in San Francisco?
{
"id": "msg_bdrk_01KCn2u3P2f2WCiQ4yuvkpNL",
"type": "message",
"role": "assistant",
"model": "claude-3-sonnet-20240229",
"content": [
{
"type": "text",
"text": "Okay, let's get the current weather for San Francisco:"
},
{
"type": "tool_use",
"id": "toolu_bdrk_01J6xY1kfNA1dVMbkqCpAp3K",
"name": "get_weather",
"input": {
"location": "San Francisco, CA"
}
}
],
"stop_reason": "tool_use",
"stop_sequence": null,
"usage": {
"input_tokens": 249,
"output_tokens": 70
}
}
------------------------------------
q: What is teh capital of India?
{
"id": "msg_bdrk_011qdVBM8cAq69Z7w7TnVsMs",
"type": "message",
"role": "assistant",
"model": "claude-3-sonnet-20240229",
"content": [
{
"type": "text",
"text": "The capital of India is New Delhi."
}
],
"stop_reason": "end_turn",
"stop_sequence": null,
"usage": {
"input_tokens": 248,
"output_tokens": 11
}
}
New contributor
Sujata Pradhan is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.