When I use langchain’s RAG method to search local txt, some content cannot be searched. Every time I change chunk_size, the content that cannot be searched will be different.The content in the file is Chinese. The TextLoader can successfully load all the content.
<code>from langchain import hub
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import FAISS
from langchain.chains import RetrievalQA
from langchain_community.llms import Ollama
from langchain_community.embeddings import HuggingFaceBgeEmbeddings
from langchain_core.prompts import PromptTemplate
import re
loader = TextLoader("knowledge.txt")
data = loader.load()
with open("geometry-knowledges-chinese.txt", 'r', encoding='utf-8') as file:
content = file.read()
lines = content.split('n')
titles = []
for line in lines:
match = re.search(r'"([^"]+)"', line)
if match:
titles.append(match.group(1))
model_name = "BAAI/bge-small-en"
model_kwargs = {"device": "cpu"}
encode_kwargs = {"normalize_embeddings": True}
hf=HuggingFaceBgeEmbeddings(model_name=model_name, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs)
# Split
text_splitter = RecursiveCharacterTextSplitter(
separators=[
"nn",
"n",
"uff0e", # Fullwidth full stop
"u3002", # Ideographic full stop
],
chunk_size =300, chunk_overlap = 50,length_function = len,add_start_index = True)
all_splits = text_splitter.split_documents(data)
# Store splits
vectorstore = FAISS.from_documents(documents=all_splits, embedding=hf)
# RAG prompt
template = """Use the following context to search for answers to your questions.
If you don't know the answer, just say you don't know, don't try to make up an answer.
Don't add anything else.{context}
Question: {question}
Helpful Answer:"""
QA_CHAIN_PROMPT = PromptTemplate(
input_variables=["context", "question"],
template=template,
)
# LLM
llm = Ollama(model="llama3.1")
# RetrievalQA
qa_chain = RetrievalQA.from_chain_type(
llm,
retriever=vectorstore.as_retriever(),
chain_type_kwargs={"prompt":QA_CHAIN_PROMPT}
)
for title in titles:
specific_question = f"Definition of{title}"
result = qa_chain({"query": specific_question})
print(f"Definition of{title}:{result['result']}")
</code>
<code>from langchain import hub
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import FAISS
from langchain.chains import RetrievalQA
from langchain_community.llms import Ollama
from langchain_community.embeddings import HuggingFaceBgeEmbeddings
from langchain_core.prompts import PromptTemplate
import re
loader = TextLoader("knowledge.txt")
data = loader.load()
with open("geometry-knowledges-chinese.txt", 'r', encoding='utf-8') as file:
content = file.read()
lines = content.split('n')
titles = []
for line in lines:
match = re.search(r'"([^"]+)"', line)
if match:
titles.append(match.group(1))
model_name = "BAAI/bge-small-en"
model_kwargs = {"device": "cpu"}
encode_kwargs = {"normalize_embeddings": True}
hf=HuggingFaceBgeEmbeddings(model_name=model_name, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs)
# Split
text_splitter = RecursiveCharacterTextSplitter(
separators=[
"nn",
"n",
"uff0e", # Fullwidth full stop
"u3002", # Ideographic full stop
],
chunk_size =300, chunk_overlap = 50,length_function = len,add_start_index = True)
all_splits = text_splitter.split_documents(data)
# Store splits
vectorstore = FAISS.from_documents(documents=all_splits, embedding=hf)
# RAG prompt
template = """Use the following context to search for answers to your questions.
If you don't know the answer, just say you don't know, don't try to make up an answer.
Don't add anything else.{context}
Question: {question}
Helpful Answer:"""
QA_CHAIN_PROMPT = PromptTemplate(
input_variables=["context", "question"],
template=template,
)
# LLM
llm = Ollama(model="llama3.1")
# RetrievalQA
qa_chain = RetrievalQA.from_chain_type(
llm,
retriever=vectorstore.as_retriever(),
chain_type_kwargs={"prompt":QA_CHAIN_PROMPT}
)
for title in titles:
specific_question = f"Definition of{title}"
result = qa_chain({"query": specific_question})
print(f"Definition of{title}:{result['result']}")
</code>
from langchain import hub
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import FAISS
from langchain.chains import RetrievalQA
from langchain_community.llms import Ollama
from langchain_community.embeddings import HuggingFaceBgeEmbeddings
from langchain_core.prompts import PromptTemplate
import re
loader = TextLoader("knowledge.txt")
data = loader.load()
with open("geometry-knowledges-chinese.txt", 'r', encoding='utf-8') as file:
content = file.read()
lines = content.split('n')
titles = []
for line in lines:
match = re.search(r'"([^"]+)"', line)
if match:
titles.append(match.group(1))
model_name = "BAAI/bge-small-en"
model_kwargs = {"device": "cpu"}
encode_kwargs = {"normalize_embeddings": True}
hf=HuggingFaceBgeEmbeddings(model_name=model_name, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs)
# Split
text_splitter = RecursiveCharacterTextSplitter(
separators=[
"nn",
"n",
"uff0e", # Fullwidth full stop
"u3002", # Ideographic full stop
],
chunk_size =300, chunk_overlap = 50,length_function = len,add_start_index = True)
all_splits = text_splitter.split_documents(data)
# Store splits
vectorstore = FAISS.from_documents(documents=all_splits, embedding=hf)
# RAG prompt
template = """Use the following context to search for answers to your questions.
If you don't know the answer, just say you don't know, don't try to make up an answer.
Don't add anything else.{context}
Question: {question}
Helpful Answer:"""
QA_CHAIN_PROMPT = PromptTemplate(
input_variables=["context", "question"],
template=template,
)
# LLM
llm = Ollama(model="llama3.1")
# RetrievalQA
qa_chain = RetrievalQA.from_chain_type(
llm,
retriever=vectorstore.as_retriever(),
chain_type_kwargs={"prompt":QA_CHAIN_PROMPT}
)
for title in titles:
specific_question = f"Definition of{title}"
result = qa_chain({"query": specific_question})
print(f"Definition of{title}:{result['result']}")
How should I modify the code to allow the LLM to search for all the content?
Your code looks correct. I would advice three things to debug.
- Your chunking strategy may not be suitable for your dataset. Try play around with chunk size and overlap values to arrive at optimal retrieval.
- I can see you have two txt file. Ensure you have actual data in vector for your query.
- Try this below for now, and let me know what happens.
qa_chain = RetrievalQA.from_chain_type(
llm,
retriever=vectorstore.as_retriever(
search_type="mmr",
search_kwargs={'k': 6, 'lambda_mult': 0.25}
),
chain_type_kwargs={"prompt":QA_CHAIN_PROMPT}
)
qa_chain = RetrievalQA.from_chain_type(
llm,
retriever=vectorstore.as_retriever(
search_type="mmr",
search_kwargs={'k': 6, 'lambda_mult': 0.25}
),
chain_type_kwargs={"prompt":QA_CHAIN_PROMPT}
)
qa_chain = RetrievalQA.from_chain_type( llm, retriever=vectorstore.as_retriever( search_type="mmr", search_kwargs={'k': 6, 'lambda_mult': 0.25} ), chain_type_kwargs={"prompt":QA_CHAIN_PROMPT} )
6