I would like to ask about LangChain for LLM in python.
I need multiple inputs with chatting history to run the model, so I tried to use RunnableWithMessageHistory but got an error.
Error in RootListenersTracer.on_chain_end callback: KeyError(‘input’)
{‘output1’: ‘future’, ‘output2’: ‘past’}
<code>llm = ChatOpenAI(
model="gpt-4o",
temperature=0,
max_tokens=None,
timeout=None,
max_retries=2,
api_key=api_info['api_key'],
organization=api_info['organization']
# base_url="...",
# other params...
)
store = {}
def get_session_history(session_ids):
if session_ids not in store:
store[session_ids] = ChatMessageHistory()
return store[session_ids]
response_schemas = [
ResponseSchema(name="output1", description="translates first word to English"),
ResponseSchema(name="output2", description="translates next word to English")
]
output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
prompt_template_1 = ChatPromptTemplate(
messages=[
SystemMessagePromptTemplate.from_template(
"Output MUST be JSON format"
),
MessagesPlaceholder(variable_name="chat_history"),
HumanMessagePromptTemplate.from_template(
"translates {korean_word1} and {korean_word2} to English."
"n{format_instructions}"
)
],
input_variables=["korean_word1", "korean_word2"],
partial_variables={"format_instructions": output_parser.get_format_instructions()}
)
chain1 = prompt_template_1 | llm | output_parser
chain_with_history = RunnableWithMessageHistory(
chain1,
get_session_history,
history_messages_key="chat_history",
)
word1 = '미래'
word2 = '과거'
result = chain_with_history.invoke(
{"korean_word1": word1, "korean_word2": word2},
config={"configurable": {"session_id": "abc123"}},
)
</code>
<code>llm = ChatOpenAI(
model="gpt-4o",
temperature=0,
max_tokens=None,
timeout=None,
max_retries=2,
api_key=api_info['api_key'],
organization=api_info['organization']
# base_url="...",
# other params...
)
store = {}
def get_session_history(session_ids):
if session_ids not in store:
store[session_ids] = ChatMessageHistory()
return store[session_ids]
response_schemas = [
ResponseSchema(name="output1", description="translates first word to English"),
ResponseSchema(name="output2", description="translates next word to English")
]
output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
prompt_template_1 = ChatPromptTemplate(
messages=[
SystemMessagePromptTemplate.from_template(
"Output MUST be JSON format"
),
MessagesPlaceholder(variable_name="chat_history"),
HumanMessagePromptTemplate.from_template(
"translates {korean_word1} and {korean_word2} to English."
"n{format_instructions}"
)
],
input_variables=["korean_word1", "korean_word2"],
partial_variables={"format_instructions": output_parser.get_format_instructions()}
)
chain1 = prompt_template_1 | llm | output_parser
chain_with_history = RunnableWithMessageHistory(
chain1,
get_session_history,
history_messages_key="chat_history",
)
word1 = '미래'
word2 = '과거'
result = chain_with_history.invoke(
{"korean_word1": word1, "korean_word2": word2},
config={"configurable": {"session_id": "abc123"}},
)
</code>
llm = ChatOpenAI(
model="gpt-4o",
temperature=0,
max_tokens=None,
timeout=None,
max_retries=2,
api_key=api_info['api_key'],
organization=api_info['organization']
# base_url="...",
# other params...
)
store = {}
def get_session_history(session_ids):
if session_ids not in store:
store[session_ids] = ChatMessageHistory()
return store[session_ids]
response_schemas = [
ResponseSchema(name="output1", description="translates first word to English"),
ResponseSchema(name="output2", description="translates next word to English")
]
output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
prompt_template_1 = ChatPromptTemplate(
messages=[
SystemMessagePromptTemplate.from_template(
"Output MUST be JSON format"
),
MessagesPlaceholder(variable_name="chat_history"),
HumanMessagePromptTemplate.from_template(
"translates {korean_word1} and {korean_word2} to English."
"n{format_instructions}"
)
],
input_variables=["korean_word1", "korean_word2"],
partial_variables={"format_instructions": output_parser.get_format_instructions()}
)
chain1 = prompt_template_1 | llm | output_parser
chain_with_history = RunnableWithMessageHistory(
chain1,
get_session_history,
history_messages_key="chat_history",
)
word1 = '미래'
word2 = '과거'
result = chain_with_history.invoke(
{"korean_word1": word1, "korean_word2": word2},
config={"configurable": {"session_id": "abc123"}},
)
And I tried to use ‘input_messages_key’ in RunnableWithMessageHistory but also failed. Because input_messages_key allow one key only, not multiple keys.
<code>chain_with_history = RunnableWithMessageHistory(
chain1,
get_session_history,
input_messages_key=["korean_word1", "korean_word2"], // Error Here
history_messages_key="chat_history",
)
</code>
<code>chain_with_history = RunnableWithMessageHistory(
chain1,
get_session_history,
input_messages_key=["korean_word1", "korean_word2"], // Error Here
history_messages_key="chat_history",
)
</code>
chain_with_history = RunnableWithMessageHistory(
chain1,
get_session_history,
input_messages_key=["korean_word1", "korean_word2"], // Error Here
history_messages_key="chat_history",
)
Please help me to write the correct code