as a follow-up to the thread on using Ollama for with_structured_output() instead of using OpenAI or Mistral, the ollama_functions.py
needs to import from langchain_core.tools
that imports from langchain_core.runnables.config
for _set_config_context
.
But I get an ImportError "cannot import name '_set_config_context' from 'langchain_core.runnables.config'
.
Is there some obvious Python importing rules that could have been violated here?
Import overview:
File ~/dry_run/ollama_functions.py:35
33 from langchain_core.runnables.base import RunnableMap
34 from langchain_core.runnables.passthrough import RunnablePassthrough
---> 35 from langchain_core.tools import BaseTool
37 DEFAULT_SYSTEM_TEMPLATE = """You have access to the following tools:
38
39 {tools}
(...)
46 }}
47 """ # noqa: E501
49 DEFAULT_RESPONSE_FUNCTION = {
50 "name": "__conversational_response",
51 "description": (
(...)
63 },
64 }
File ~/.local/lib/python3.10/site-packages/langchain_core/tools.py:67
60 from langchain_core.retrievers import BaseRetriever
61 from langchain_core.runnables import (
62 Runnable,
63 RunnableConfig,
64 RunnableSerializable,
65 ensure_config,
66 )
---> 67 from langchain_core.runnables.config import (
68 _set_config_context,
69 patch_config,
70 run_in_executor,
71 )
72 from langchain_core.runnables.utils import accepts_context
75 class SchemaAnnotationError(TypeError):
I tested importing patch_config
from the same .config
file without any problem.
The _set_config_context
function looks like this:
def _set_config_context(config: RunnableConfig) -> None:
"""Set the child runnable config + tracing context
Args:
config (RunnableConfig): The config to set.
"""
from langsmith import (
RunTree, # type: ignore
run_helpers, # type: ignore
)
var_child_runnable_config.set(config)
if hasattr(RunTree, "from_runnable_config"):
# import _set_tracing_context, get_tracing_context
rt = RunTree.from_runnable_config(dict(config))
tc = run_helpers.get_tracing_context()
run_helpers._set_tracing_context({**tc, "parent": rt})