Inside a docker container built using
FROM ubuntu
# Install Prequisites
RUN apt-get update && apt-get install -y build-essential cmake gfortran libcurl4-openssl-dev libssl-dev libxml2-dev python3-dev python3-pip python3-venv
RUN pip install langchain langchain-core langchain-community langchain-experimental langchain-chroma langchain_ollama pandas --break-system-packages
If I try to connect to the LLM models on host system using CuRL they runs fine
root@1fec10f8d40e:/# curl http://192.168.11.98:9000/api/generate -d '{
"model": "llama3.1:8b",
"prompt": "Test",
"stream": false
}'
{"model":"llama3.1:8b","created_at":"2024-08-04T03:49:46.282365097Z","response":"It looks like you want to test me. I'm happy to play along!nnHow would you like to proceed? Would you like to:nnA) Ask a simple questionnB) Provide a statement and ask for feedbacknC) Engage in a conversation on a specific topicnD) Something else (please specify)nnLet me know, and we can get started!","done":true,"done_reason":"stop","context":[128006,882,128007,271,2323,128009,128006,78191,128007,271,2181,5992,1093,499,1390,311,1296,757,13,358,2846,6380,311,1514,3235,2268,4438,1053,499,1093,311,10570,30,19418,499,1093,311,1473,32,8,21069,264,4382,3488,198,33,8,40665,264,5224,323,2610,369,11302,198,34,8,3365,425,304,264,10652,389,264,3230,8712,198,35,8,25681,775,320,31121,14158,696,10267,757,1440,11,323,584,649,636,3940,0],"total_duration":2073589200,"load_duration":55691013,"prompt_eval_count":11,"prompt_eval_duration":32157000,"eval_count":76,"eval_duration":1943850000}
But when I try to run connect the same using langchain only embedding model works but LLM fails code below
from langchain_community.embeddings import OllamaEmbeddings
from langchain_ollama import OllamaLLM
embeddings_model = OllamaEmbeddings(base_url = "http://192.168.11.98:9000", model="nomic-embed-text:v1.5", num_ctx=4096)
embeddings_model.embed_query("Test")
## LLM Model
llm_model = OllamaLLM(base_url = "http://192.168.11.98:9000",model="llama3.1:8b",num_ctx = 2048)
llm_model.invoke("Test")
Error
>>> from langchain_community.embeddings import OllamaEmbeddings
>>> from langchain_ollama import OllamaLLM
>>> embeddings_model = OllamaEmbeddings(base_url = "http://192.168.11.98:9000", model="nomic-embed-text:v1.5", num_ctx=4096)
>>> embeddings_model.embed_query("Test")
[0.8171377182006836, 0.7424322366714478, -3.6913845539093018, -0.5350275635719299, 1.98311185836792, -0.08007726818323135, 0.7974349856376648, -0.5946609377861023, 1.4877475500106812, -0.8044648766517639, 0.38856828212738037, 1.0630642175674438, 0.6806553602218628, -0.9530377984046936, -1.4606661796569824, -0.2956351637840271,
........,
-0.9512965083122253]
>>>
>>> ## LLM Model
>>> llm_model = OllamaLLM(base_url = "http://192.168.11.98:9000",model="llama3.1:8b",num_ctx = 2048)
>>> llm_model.invoke("Test")
Traceback (most recent call last):
File "/root/.virtualenvs/aaveLLM/lib/python3.10/site-packages/httpx/_transports/default.py", line 69, in map_httpcore_exceptions
yield
File "/root/.virtualenvs/aaveLLM/lib/python3.10/site-packages/httpx/_transports/default.py", line 233, in handle_request
resp = self._pool.handle_request(req)
File "/root/.virtualenvs/aaveLLM/lib/python3.10/site-packages/httpcore/_sync/connection_pool.py", line 216, in handle_request
raise exc from None
File "/root/.virtualenvs/aaveLLM/lib/python3.10/site-packages/httpcore/_sync/connection_pool.py", line 196, in handle_request
response = connection.handle_request(
File "/root/.virtualenvs/aaveLLM/lib/python3.10/site-packages/httpcore/_sync/connection.py", line 99, in handle_request
raise exc
File "/root/.virtualenvs/aaveLLM/lib/python3.10/site-packages/httpcore/_sync/connection.py", line 76, in handle_request
stream = self._connect(request)
File "/root/.virtualenvs/aaveLLM/lib/python3.10/site-packages/httpcore/_sync/connection.py", line 122, in _connect
stream = self._network_backend.connect_tcp(**kwargs)
File "/root/.virtualenvs/aaveLLM/lib/python3.10/site-packages/httpcore/_backends/sync.py", line 205, in connect_tcp
with map_exceptions(exc_map):
File "/usr/lib/python3.10/contextlib.py", line 153, in __exit__
self.gen.throw(typ, value, traceback)
File "/root/.virtualenvs/aaveLLM/lib/python3.10/site-packages/httpcore/_exceptions.py", line 14, in map_exceptions
raise to_exc(exc) from exc
httpcore.ConnectError: [Errno 111] Connection refused
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/root/.virtualenvs/aaveLLM/lib/python3.10/site-packages/langchain_core/language_models/llms.py", line 346, in invoke
self.generate_prompt(
File "/root/.virtualenvs/aaveLLM/lib/python3.10/site-packages/langchain_core/language_models/llms.py", line 703, in generate_prompt
return self.generate(prompt_strings, stop=stop, callbacks=callbacks, **kwargs)
File "/root/.virtualenvs/aaveLLM/lib/python3.10/site-packages/langchain_core/language_models/llms.py", line 882, in generate
output = self._generate_helper(
File "/root/.virtualenvs/aaveLLM/lib/python3.10/site-packages/langchain_core/language_models/llms.py", line 740, in _generate_helper
raise e
File "/root/.virtualenvs/aaveLLM/lib/python3.10/site-packages/langchain_core/language_models/llms.py", line 727, in _generate_helper
self._generate(
File "/root/.virtualenvs/aaveLLM/lib/python3.10/site-packages/langchain_ollama/llms.py", line 268, in _generate
final_chunk = self._stream_with_aggregation(
File "/root/.virtualenvs/aaveLLM/lib/python3.10/site-packages/langchain_ollama/llms.py", line 236, in _stream_with_aggregation
for stream_resp in self._create_generate_stream(prompt, stop, **kwargs):
File "/root/.virtualenvs/aaveLLM/lib/python3.10/site-packages/langchain_ollama/llms.py", line 186, in _create_generate_stream
yield from ollama.generate(
File "/root/.virtualenvs/aaveLLM/lib/python3.10/site-packages/ollama/_client.py", line 79, in _stream
with self._client.stream(method, url, **kwargs) as r:
File "/usr/lib/python3.10/contextlib.py", line 135, in __enter__
return next(self.gen)
File "/root/.virtualenvs/aaveLLM/lib/python3.10/site-packages/httpx/_client.py", line 870, in stream
response = self.send(
File "/root/.virtualenvs/aaveLLM/lib/python3.10/site-packages/httpx/_client.py", line 914, in send
response = self._send_handling_auth(
File "/root/.virtualenvs/aaveLLM/lib/python3.10/site-packages/httpx/_client.py", line 942, in _send_handling_auth
response = self._send_handling_redirects(
File "/root/.virtualenvs/aaveLLM/lib/python3.10/site-packages/httpx/_client.py", line 979, in _send_handling_redirects
response = self._send_single_request(request)
File "/root/.virtualenvs/aaveLLM/lib/python3.10/site-packages/httpx/_client.py", line 1015, in _send_single_request
response = transport.handle_request(request)
File "/root/.virtualenvs/aaveLLM/lib/python3.10/site-packages/httpx/_transports/default.py", line 232, in handle_request
with map_httpcore_exceptions():
File "/usr/lib/python3.10/contextlib.py", line 153, in __exit__
self.gen.throw(typ, value, traceback)
File "/root/.virtualenvs/aaveLLM/lib/python3.10/site-packages/httpx/_transports/default.py", line 86, in map_httpcore_exceptions
raise mapped_exc(message) from exc
httpx.ConnectError: [Errno 111] Connection refused
The issue doesn’t appear when I try to connect using langchain but outside the docker environment or inside the docker environment but without using langchain, The issue only appears in the Docker+Langchain environment, Any ideas what’s breaking here?