This program is designed to create a chatbot that helps users find and interact with information from websites. It works by first fetching website links from a specific page and saving them. Then, it retrieves and processes the content of these web pages, including text and images. This information is stored and used to answer user queries via an AI assistant.
I want to add a reference section at the end of the AI assistant’s answer that cites the website from where it got the information. So far, it hasn’t been working correctly. When I tried adding URLs in the context along with the sources, and asked the model to cite the URLs of the sources, the same URLs were being printed for all questions. What can I do, so that the correct source URL is cited for each specific answer?
Here is some of my code:
#API key
os.environ['API_KEY'] = ''
# Function to fetch URLs from the website and save them to urls.txt
def fetch_urls():
driver_path = '/usr/local/bin/chromedriver'
service = Service(driver_path)
driver = webdriver.Chrome(service=service)
url = 'https://www.example'
driver.get(url)
time.sleep(5)
links = driver.find_elements(By.TAG_NAME, 'a')
urls = []
for link in links:
href = link.get_attribute('href')
if href and href.startswith('https://example/'):
urls.append(href)
driver.quit()
with open("urls.txt", "w") as file:
for url in urls:
file.write(f"{url}n")
return urls
# Function to fetch and extract text content and images from a specific div class on a website using Requests
def fetch_website_content(url):
try:
response = requests.get(url)
response.raise_for_status()
soup = BeautifulSoup(response.text, 'html.parser')
# Find the specific div with class 'page-body-wrap'
page_body_wrap = soup.find('div', class_='page-body-wrap')
if not page_body_wrap:
st.warning(f"No content found in div 'page-body-wrap' on {url}.")
return None, [], url # Return the URL along with content and images
# Extract text content from the div
text_content = page_body_wrap.get_text(separator='nn').strip()
# Extract image URLs
image_urls = []
images = page_body_wrap.find_all('img')
for img in images:
src = img.get('src')
if src:
image_urls.append(src)
return text_content, image_urls, url
except Exception as e:
st.error(f"Error fetching {url}: {str(e)}")
return None, [], url
# Main function
def main():
# Fetch URLs from the website
website_urls = fetch_urls()
if not website_urls:
st.error("No URLs fetched from the website.")
return
documents = []
# Fetch content from each website in parallel
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(fetch_website_content, url) for url in website_urls]
for future in futures:
text_content, image_urls, fetched_url = future.result()
if text_content:
documents.append(Document(page_content=text_content, metadata={"source": fetched_url, "images": image_urls}))
if not documents:
st.error("No content fetched from the websites.")
return
# Load or create vector store
vector_store_path = "vectorstore.pkl"
if os.path.exists(vector_store_path) and os.path.getsize(vector_store_path) > 0:
with open(vector_store_path, "rb") as f:
vectorstore = pickle.load(f)
else:
document_embedder = Embeddings(model="name", model_type="passage")
vectorstore = FAISS.from_documents(documents, document_embedder)
with open(vector_store_path, "wb") as f:
pickle.dump(vectorstore, f)
st.success("Vector store created successfully.")
# Initialize models
llm = Chat(model="name")
prompt_template = ChatPromptTemplate.from_messages(
[("system", "You are a helpful AI Assistant..."), ("user", "{input}")]
)
chain = prompt_template | llm | StrOutputParser()
# Input form
with st.form(key='chat_form', clear_on_submit=True):
user_input = st.text_input("Ask your question:", key="user_input")
submitted = st.form_submit_button("Send")
# Handle user input and generate response
if submitted and user_input and vectorstore is not None:
st.session_state.messages.insert(0, {"role": "user", "content": user_input})
# Retrieve relevant documents from vectorstore
retriever = vectorstore.as_retriever()
relevant_docs = retriever.get_relevant_documents(user_input)
# Check for relevance
if not relevant_docs:
st.warning("No relevant documents found for the query.")
return
# Combine relevant texts into context
context = "nn".join(doc.page_content for doc in relevant_docs) if relevant_docs else ""
augmented_user_input = f"Context: {context}nnQuestion: {user_input}"
# Invoke the AI assistant with augmented input
response = chain.invoke({"input": augmented_user_input})
st.session_state.messages.insert(0, {"role": "assistant", "content": response})
st.experimental_rerun()
# Display chat messages
# Run the main