I am trying to connect ollama Llama2 with crew ai, and i am getting "openai.NotFoundError: 404 page not found"
in .env i have below values
OPENAI_API_BASE=http://localhost:11434/v1 OPENAI_MODEL_NAME=llama2 OPENAI_API_KEY=NA
in main file, i have below line to connect
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(
model="llama2",
base_url="http://localhost:11434/v1"
)
i am trying to create a sample crew ai which is connecting with Local LLM,
Note: i installed Local LLM Ollama llama2 succesfully and i can able to run the llm without any issue.
but while connecting with crew ai i am getting error
Below i have detailed error message:
Traceback (most recent call last):
File "D:crew_aicrew.py", line 114, in <module>
result = crew.kickoff()
^^^^^^^^^^^^^^
File "D:crew_ai.my_crew_envLibsite-packagescrewaicrew.py", line 252, in kickoff
result = self._run_sequential_process()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:crew_ai.my_crew_envLibsite-packagescrewaicrew.py", line 293, in _run_sequential_process
output = task.execute(context=task_output)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:crew_ai.my_crew_envLibsite-packagescrewaitask.py", line 173, in execute
result = self._execute(
^^^^^^^^^^^^^^
File "D:crew_ai.my_crew_envLibsite-packagescrewaitask.py", line 182, in _execute
result = agent.execute_task(
^^^^^^^^^^^^^^^^^^^
File "D:crew_ai.my_crew_envLibsite-packagescrewaiagent.py", line 207, in execute_task
memory = contextual_memory.build_context_for_task(task, context)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:crew_ai.my_crew_envLibsite-packagescrewaimemorycontextualcontextual_memory.py", line 22, in build_context_for_task
context.append(self._fetch_stm_context(query))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:crew_ai.my_crew_envLibsite-packagescrewaimemorycontextualcontextual_memory.py", line 31, in _fetch_stm_context
stm_results = self.stm.search(query)
^^^^^^^^^^^^^^^^^^^^^^
File "D:crew_ai.my_crew_envLibsite-packagescrewaimemoryshort_termshort_term_memory.py", line 23, in search
return self.storage.search(query=query, score_threshold=score_threshold)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:crew_ai.my_crew_envLibsite-packagescrewaimemorystoragerag_storage.py", line 90, in search
else self.app.search(query, limit)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:crew_ai.my_crew_envLibsite-packagesembedchainembedchain.py", line 635, in search
return [{"context": c[0], "metadata": c[1]} for c in self.db.query(**params)]
^^^^^^^^^^^^^^^^^^^^^^^
File "D:crew_ai.my_crew_envLibsite-packagesembedchainvectordbchroma.py", line 220, in query
result = self.collection.query(
^^^^^^^^^^^^^^^^^^^^^^
File "D:crew_ai.my_crew_envLibsite-packageschromadbapimodelsCollection.py", line 327, in query
valid_query_embeddings = self._embed(input=valid_query_texts)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:crew_ai.my_crew_envLibsite-packageschromadbapimodelsCollection.py", line 633, in _embed
return self._embedding_function(input=input)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:crew_ai.my_crew_envLibsite-packageschromadbapitypes.py", line 193, in __call__
result = call(self, input)
^^^^^^^^^^^^^^^^^
File "D:crew_ai.my_crew_envLibsite-packageschromadbutilsembedding_functions.py", line 188, in __call__
embeddings = self._client.create(
^^^^^^^^^^^^^^^^^^^^
File "D:crew_ai.my_crew_envLibsite-packagesopenairesourcesembeddings.py", line 113, in create
return self._post(
^^^^^^^^^^^
File "D:crew_ai.my_crew_envLibsite-packagesopenai_base_client.py", line 1232, in post
return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:crew_ai.my_crew_envLibsite-packagesopenai_base_client.py", line 921, in request
return self._request(
^^^^^^^^^^^^^^
File "D:crew_ai.my_crew_envLibsite-packagesopenai_base_client.py", line 1012, in _request
raise self._make_status_error_from_response(err.response) from None
openai.NotFoundError: 404 page not found