I’m new to LLM and confronted with problems I have no clue how to solve them or why they’re caused.
One of them being as_retriever() not being recognized by VSCode, if that makes sense. It’s just white.
Another problem is the one you will see below:
ValidationError: 1 validation error for StuffDocumentsChain __root__ document_variable_name context was not found in llm_chain input_variables: ['', 'question'] (type=value_error)
Traceback:
File "C:UsersDELLAppDataLocalProgramsPythonPython310libsite-packagesstreamlitruntimescriptrunnerscript_runner.py", line 584, in _run_script
exec(code, module.__dict__)
File "C:UsersDELLOneDriveBureaubladRamroop AIapp.py", line 70, in <module>
response = final_result(prompt)
File "C:UsersDELLOneDriveBureaubladRamroop AIapp.py", line 55, in final_result
qa_result = qa_bot()
File "C:UsersDELLOneDriveBureaubladRamroop AIapp.py", line 49, in qa_bot
qa = retrieval_qa_chain(llm, qa_prompt, db)
File "C:UsersDELLOneDriveBureaubladRamroop AIapp.py", line 33, in retrieval_qa_chain
qa_chain = RetrievalQA.from_chain_type(
File "C:UsersDELLAppDataLocalProgramsPythonPython310libsite-packageslangchainchainsretrieval_qabase.py", line 105, in from_chain_type
combine_documents_chain = load_qa_chain(
File "C:UsersDELLAppDataLocalProgramsPythonPython310libsite-packageslangchainchainsquestion_answering__init__.py", line 249, in load_qa_chain
return loader_mapping[chain_type](
File "C:UsersDELLAppDataLocalProgramsPythonPython310libsite-packageslangchainchainsquestion_answering__init__.py", line 81, in _load_stuff_chain
return StuffDocumentsChain(
File "C:UsersDELLAppDataLocalProgramsPythonPython310libsite-packageslangchain_coreloadserializable.py", line 120, in __init__
super().__init__(**kwargs)
File "C:UsersDELLAppDataLocalProgramsPythonPython310libsite-packagespydanticv1main.py", line 341, in __init__
raise validation_error
These are the 2 problems I’m currently facing. Any help will greatly appreciated. I’m adding my entire for your convenience.
My code:
import streamlit as st
from langchain_core.prompts import PromptTemplate
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_community.llms.ctransformers import CTransformers
from langchain.chains.retrieval_qa.base import RetrievalQA
DB_FAISS_PATH = "vectorstoresdb"
custom_promt_template = """You are a helpful, respectful and honest assistant.
Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content.
Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct.
If you don't know the answer to a question, please don't share false information.
Context: {}
Question: {question}
"""
def set_custom_promt():
prompt = PromptTemplate(template=custom_promt_template, input_variables=['', 'question'])
return prompt
def load_llm():
llm = CTransformers(model= "llama-2-7b-chat.ggmlv3.q4_0.bin", model_type= "llama", max_new_tokens= 512, temperature= 0.7)
return llm
def retrieval_qa_chain(llm, prompt, db):
qa_chain = RetrievalQA.from_chain_type(
llm= llm,
chain_type= "stuff",
retriever = db.as_retriever(search_kwargs={'k': 2}),
return_source_documents = True,
chain_type_kwargs= {'prompt': prompt}
)
return qa_chain
def qa_bot():
embeddings = HuggingFaceEmbeddings(model_name= 'sentence-transformers/all-MiniLM-L6-v2', model_kwargs={'device': 'cpu'})
db = FAISS.load_local(DB_FAISS_PATH, embeddings, allow_dangerous_deserialization=True)
llm = load_llm()
qa_prompt = set_custom_promt()
qa = retrieval_qa_chain(llm, qa_prompt, db)
return qa
def final_result(query):
qa_result = qa_bot()
response = qa_result({'query': query})
return response
# Create centered main title
st.title('???? QA Bot')
# Create a text input box for the user
prompt = st.text_input('Input your question here')
# If the user hits enter
if prompt:
response = final_result(prompt)
# Writing it out to the screen
st.write(response)
# Displaying source text
with st.expander('Source Text'):
st.write(response['source_text'])