So, my question is that whenever I ask question to my chatbot it gave me answer based on saved vector which is in chroma db.
I have tried this, but it did not work: -https://python.langchain.com/docs/how_to/indexing/
def my_chain(project_id, f_tag):
# Load documents
raw_documents = load_documents_from_db(project_id)
if not raw_documents:
raise ValueError(
"No documents were loaded from the database. Please check the database connection and query.")
# Load edges
connections = load_edges_from_db(project_id)
# if not connections:
# raise ValueError(
# "No connections were loaded from the database. Please check the database connection and query.")
# print('edges',connections)
if not texts:
raise ValueError(
"No texts were created from the documents. Please check the document splitting logic.")
# Use the document graph to enhance page content with node connections
for doc in texts:
node_id = doc.metadata.get('id')
print("node_id", node_id)
# Initialize embeddings and vector store with IDs
embeddings = OpenAIEmbeddings(
model="text-embedding-3-small", openai_api_key=os.getenv("OPENAI_API_KEY"))
vector_store = Chroma.from_documents(
texts,
embeddings,
collection_name=str(f"tale_teller_{project_id}"),
#ids=[doc.metadata['id'] for doc in texts]
)
so, what I like to do is whenever user ask question related to content it should be based on latest content rather than saved vector in chroma DB.