I am building a tool wherein the system will ask certain questions to a user through an agent executor and in the end the inputs are compiled into a json. Now I am writing a prompt to generate a document based on the user’s inputs but I am unable to initiate because it says it lacks a list of BaseMessages
<code>import json
import chainlit as cl
from chainlit.sync import run_sync
from docx import Document
from document import document_types
from dotenv import load_dotenv
from langchain.agents import initialize_agent, AgentType
from langchain_core.prompt_values import PromptValue
from langchain.tools import BaseTool
from langchain_openai import ChatOpenAI
load_dotenv()
class HumanInputChainlit(BaseTool):
"""Tool that adds the capability to ask user for input."""
name = "human"
description = (
"You can ask a human for guidance when you think you got stuck or you are not sure what to do next. "
"The input can be concise or short, and you should frame the context intelligently to continue. "
"The input should be a question for the human."
)
def _run(self, query: str, run_manager=None) -> str:
"""Use the Human input tool."""
res = run_sync(cl.AskUserMessage(content=query).send())
return res["content"]
async def _arun(self, query: str, run_manager=None) -> str:
"""Use the Human input tool."""
res = await cl.AskUserMessage(content=query).send()
return res["output"]
@cl.on_chat_start
def start():
llm = ChatOpenAI(temperature=0, streaming=True, model_name="gpt-3.5-turbo")
tools = [
HumanInputChainlit()
]
agent = initialize_agent(
tools, llm, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True, handle_parsing_errors=True
)
cl.user_session.set("agent", agent)
cl.user_session.set("document_type", None)
cl.user_session.set("answers", {})
cl.user_session.set("current_question_index", None)
@cl.on_message
async def main(message: cl.Message):
agent = cl.user_session.get("agent")
document_type = cl.user_session.get("document_type")
answers = cl.user_session.get("answers")
if document_type is None:
document_type = message.content.strip()
if document_type not in document_types:
await cl.Message(content="Invalid document type. Please try again.").send()
return
cl.user_session.set("document_type", document_type)
first_question = document_types[document_type][0]
cl.user_session.set("current_question_index", 0)
res = await agent.arun(first_question)
await cl.Message(content=res).send()
else:
current_question_index = cl.user_session.get("current_question_index")
question = document_types[document_type][current_question_index]
answers[question] = message.content.strip()
cl.user_session.set("answers", answers)
current_question_index += 1
if current_question_index < len(document_types[document_type]):
next_question = document_types[document_type][current_question_index]
cl.user_session.set("current_question_index", current_question_index)
res = await agent.arun(next_question)
await cl.Message(content=res).send()
else:
# Store answers in a JSON file
json_file_path = f"{document_type.replace(' ', '_')}_answers.json"
with open(json_file_path, "w") as json_file:
json.dump(answers, json_file)
await cl.Message(content="All questions answered. Generating the document...").send()
await generate_document(document_type, json_file_path)
async def generate_document(document_type, json_file_path):
with open(json_file_path, "r") as json_file:
answers = json.load(json_file)
llm = ChatOpenAI(temperature=0, streaming=True, model_name="gpt-3.5-turbo")
prompt = f"Generate a comprehensive professional {document_type} with proper headers based on the following answers:nn"
for question, answer in answers.items():
prompt += f"{question}nAnswer: {answer}nn"
ai_message = PromptValue(text=prompt)
result = await llm.invoke(ai_message)
response = result.content
doc = Document()
doc.add_heading(f"{document_type}", level=1)
doc.add_paragraph(response)
output_file_path = f"{document_type.replace(' ', '_')}.docx"
doc.save(output_file_path)
with open(output_file_path, "rb") as f:
bytes_data = f.read()
await cl.File(
name=output_file_path,
content=bytes_data,
mime="application/vnd.openxmlformats-officedocument.wordprocessingml.document",
).send()
await cl.Message(content="Document generation completed. You can download the file.").send()
cl.user_session.set("document_type", None)
cl.user_session.set("answers", {})
cl.user_session.set("current_question_index", None)
</code>
<code>import json
import chainlit as cl
from chainlit.sync import run_sync
from docx import Document
from document import document_types
from dotenv import load_dotenv
from langchain.agents import initialize_agent, AgentType
from langchain_core.prompt_values import PromptValue
from langchain.tools import BaseTool
from langchain_openai import ChatOpenAI
load_dotenv()
class HumanInputChainlit(BaseTool):
"""Tool that adds the capability to ask user for input."""
name = "human"
description = (
"You can ask a human for guidance when you think you got stuck or you are not sure what to do next. "
"The input can be concise or short, and you should frame the context intelligently to continue. "
"The input should be a question for the human."
)
def _run(self, query: str, run_manager=None) -> str:
"""Use the Human input tool."""
res = run_sync(cl.AskUserMessage(content=query).send())
return res["content"]
async def _arun(self, query: str, run_manager=None) -> str:
"""Use the Human input tool."""
res = await cl.AskUserMessage(content=query).send()
return res["output"]
@cl.on_chat_start
def start():
llm = ChatOpenAI(temperature=0, streaming=True, model_name="gpt-3.5-turbo")
tools = [
HumanInputChainlit()
]
agent = initialize_agent(
tools, llm, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True, handle_parsing_errors=True
)
cl.user_session.set("agent", agent)
cl.user_session.set("document_type", None)
cl.user_session.set("answers", {})
cl.user_session.set("current_question_index", None)
@cl.on_message
async def main(message: cl.Message):
agent = cl.user_session.get("agent")
document_type = cl.user_session.get("document_type")
answers = cl.user_session.get("answers")
if document_type is None:
document_type = message.content.strip()
if document_type not in document_types:
await cl.Message(content="Invalid document type. Please try again.").send()
return
cl.user_session.set("document_type", document_type)
first_question = document_types[document_type][0]
cl.user_session.set("current_question_index", 0)
res = await agent.arun(first_question)
await cl.Message(content=res).send()
else:
current_question_index = cl.user_session.get("current_question_index")
question = document_types[document_type][current_question_index]
answers[question] = message.content.strip()
cl.user_session.set("answers", answers)
current_question_index += 1
if current_question_index < len(document_types[document_type]):
next_question = document_types[document_type][current_question_index]
cl.user_session.set("current_question_index", current_question_index)
res = await agent.arun(next_question)
await cl.Message(content=res).send()
else:
# Store answers in a JSON file
json_file_path = f"{document_type.replace(' ', '_')}_answers.json"
with open(json_file_path, "w") as json_file:
json.dump(answers, json_file)
await cl.Message(content="All questions answered. Generating the document...").send()
await generate_document(document_type, json_file_path)
async def generate_document(document_type, json_file_path):
with open(json_file_path, "r") as json_file:
answers = json.load(json_file)
llm = ChatOpenAI(temperature=0, streaming=True, model_name="gpt-3.5-turbo")
prompt = f"Generate a comprehensive professional {document_type} with proper headers based on the following answers:nn"
for question, answer in answers.items():
prompt += f"{question}nAnswer: {answer}nn"
ai_message = PromptValue(text=prompt)
result = await llm.invoke(ai_message)
response = result.content
doc = Document()
doc.add_heading(f"{document_type}", level=1)
doc.add_paragraph(response)
output_file_path = f"{document_type.replace(' ', '_')}.docx"
doc.save(output_file_path)
with open(output_file_path, "rb") as f:
bytes_data = f.read()
await cl.File(
name=output_file_path,
content=bytes_data,
mime="application/vnd.openxmlformats-officedocument.wordprocessingml.document",
).send()
await cl.Message(content="Document generation completed. You can download the file.").send()
cl.user_session.set("document_type", None)
cl.user_session.set("answers", {})
cl.user_session.set("current_question_index", None)
</code>
import json
import chainlit as cl
from chainlit.sync import run_sync
from docx import Document
from document import document_types
from dotenv import load_dotenv
from langchain.agents import initialize_agent, AgentType
from langchain_core.prompt_values import PromptValue
from langchain.tools import BaseTool
from langchain_openai import ChatOpenAI
load_dotenv()
class HumanInputChainlit(BaseTool):
"""Tool that adds the capability to ask user for input."""
name = "human"
description = (
"You can ask a human for guidance when you think you got stuck or you are not sure what to do next. "
"The input can be concise or short, and you should frame the context intelligently to continue. "
"The input should be a question for the human."
)
def _run(self, query: str, run_manager=None) -> str:
"""Use the Human input tool."""
res = run_sync(cl.AskUserMessage(content=query).send())
return res["content"]
async def _arun(self, query: str, run_manager=None) -> str:
"""Use the Human input tool."""
res = await cl.AskUserMessage(content=query).send()
return res["output"]
@cl.on_chat_start
def start():
llm = ChatOpenAI(temperature=0, streaming=True, model_name="gpt-3.5-turbo")
tools = [
HumanInputChainlit()
]
agent = initialize_agent(
tools, llm, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True, handle_parsing_errors=True
)
cl.user_session.set("agent", agent)
cl.user_session.set("document_type", None)
cl.user_session.set("answers", {})
cl.user_session.set("current_question_index", None)
@cl.on_message
async def main(message: cl.Message):
agent = cl.user_session.get("agent")
document_type = cl.user_session.get("document_type")
answers = cl.user_session.get("answers")
if document_type is None:
document_type = message.content.strip()
if document_type not in document_types:
await cl.Message(content="Invalid document type. Please try again.").send()
return
cl.user_session.set("document_type", document_type)
first_question = document_types[document_type][0]
cl.user_session.set("current_question_index", 0)
res = await agent.arun(first_question)
await cl.Message(content=res).send()
else:
current_question_index = cl.user_session.get("current_question_index")
question = document_types[document_type][current_question_index]
answers[question] = message.content.strip()
cl.user_session.set("answers", answers)
current_question_index += 1
if current_question_index < len(document_types[document_type]):
next_question = document_types[document_type][current_question_index]
cl.user_session.set("current_question_index", current_question_index)
res = await agent.arun(next_question)
await cl.Message(content=res).send()
else:
# Store answers in a JSON file
json_file_path = f"{document_type.replace(' ', '_')}_answers.json"
with open(json_file_path, "w") as json_file:
json.dump(answers, json_file)
await cl.Message(content="All questions answered. Generating the document...").send()
await generate_document(document_type, json_file_path)
async def generate_document(document_type, json_file_path):
with open(json_file_path, "r") as json_file:
answers = json.load(json_file)
llm = ChatOpenAI(temperature=0, streaming=True, model_name="gpt-3.5-turbo")
prompt = f"Generate a comprehensive professional {document_type} with proper headers based on the following answers:nn"
for question, answer in answers.items():
prompt += f"{question}nAnswer: {answer}nn"
ai_message = PromptValue(text=prompt)
result = await llm.invoke(ai_message)
response = result.content
doc = Document()
doc.add_heading(f"{document_type}", level=1)
doc.add_paragraph(response)
output_file_path = f"{document_type.replace(' ', '_')}.docx"
doc.save(output_file_path)
with open(output_file_path, "rb") as f:
bytes_data = f.read()
await cl.File(
name=output_file_path,
content=bytes_data,
mime="application/vnd.openxmlformats-officedocument.wordprocessingml.document",
).send()
await cl.Message(content="Document generation completed. You can download the file.").send()
cl.user_session.set("document_type", None)
cl.user_session.set("answers", {})
cl.user_session.set("current_question_index", None)
New contributor
Vishwanath U is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.