@cl.on_message
async def main(message: cl.Message):
counter = cl.user_session.get("counter", 0)
if counter < 1:
chain = cl.user_session.get("chain")
cb = cl.AsyncLangchainCallbackHandler(stream_final_answer=True)
cb.answer_reached = True
response = await chain.acall({'query': message.content}, callbacks=[cb])
cl.user_session.set("counter", counter + 1) # Increment the counter
# Extract the result
if isinstance(response, dict) and 'result' in response:
res = response['result']
else:
res = response
# Ensure response is a string
if not isinstance(res, str):
res = str(res)
#await cl.Message(content=res).send()
combined_input = {'question': message.content}
memory_response = {'result': res}
await memory_retrieval.asave_context(inputs=combined_input, outputs=memory_response)
# Debugging: Print memory_retrieval
print("Memory Conversation after saving context:")
print(memory_conversation.buffer)
else:
# Use the LLM directly for subsequent questions
llm = load_llm()
conversation_prompt = set_conversation_prompt()
llm_chain = ConversationChain(llm=llm, prompt=conversation_prompt, memory=memory_conversation, input_key='input', verbose=True)
cb = cl.AsyncLangchainCallbackHandler(stream_final_answer=True)
cb.answer_reached = True
# Get history from memory and create the context
previous_history = memory_conversation.load_memory_variables({}).get('history', '')
print("Chat History:", previous_history)
print("Type of Chat History:", type(previous_history))
combined_input = {'history': previous_history, 'input': message.content}
# # Use the conversation memory directly
# combined_input = {'history': memory_conversation.buffer, 'input': message.content}
response = await llm_chain.ainvoke(combined_input, callbacks=[cb])
# If the response is a dictionary, extract the 'response' field
if isinstance(response, dict) and 'response' in response:
res = response['response']
else:
res = response # Fallback message if response is not a dictionary
await cl.Message(content=res).send()
memory_response = {'response': res}
await memory_conversation.asave_context(inputs=combined_input, outputs=memory_response)
# Debugging: Print memory_conversation
print("Memory Conversation after saving context:")
print(memory_conversation.buffer)
I’ve been working on creating a first aid chabot for a project but I am stuck. I am using RetrievalQA to look for a symptom in my database using vectordb and trying to continue the conversation using ConversationChain. However, the answer seems to be a bit buggy despite me trying to pass down the memory.
I tried using ConversationalRetrievalChain but I keep getting the error Chat History is in the wrong format. Chat History type: .
I also tried to use SequentialChain to pass the output of RetrievalQA to ConversationChain but the output of RetrievalQA is different from the input of ConversationChain.
Tony Chen is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.