this is my code:
import os
from dotenv import load_dotenv
load_
dotenv()
from langchain_openai import ChatOpenAI
from langgraph.graph import StateGraph, END
from langgraph.graph import Graph, MessagesState
from typing import Annotated, Any, Dict, Optional, List,Sequence, TypedDict
from langgraph.graph.message import add_messages
class AgentState(TypedDict):
# The `add_messages` function within the annotation defines
# *how* updates should be merged into the state.
messages: Annotated[list, add_messages]
def function1(state):
return {"messages": "Hi"}
def function2(state):
return {"messages": "Hello"}
def my_condition(state):
return "end"
workflow=StateGraph(AgentState)
workflow.add_node("agent", function1)
workflow.add_node("tool", function2)
workflow.add_edge('agent','tool')
workflow.set_entry_point("agent")
workflow.add_conditional_edges("agent", my_condition,{ "end": END})
app=workflow.compile()
print(app.invoke({"messages": "tell me about you"}))
In above code I want to END function at “function1” and get this result:
{‘messages’: [HumanMessage(content=’tell me about you’, id=’70a7cb55-4cb2-4d0b-9623-79cb06bcabf3′), HumanMessage(content=’Hi’, id=’d95bd56d-93b6-44b1-ae05-3449472d8463′)]}
But I am getting this below result:
{‘messages’: [HumanMessage(content=’tell me about you’, id=’70a7cb55-4cb2-4d0b-9623-79cb06bcabf3′), HumanMessage(content=’Hi’, id=’d95bd56d-93b6-44b1-ae05-3449472d8463′), HumanMessage(content=’Hello’, id=’7ea9ab2a-635f-46eb-8f17-d9a6af79688e’)]}