I’m instantiating a python elasticsearch client as follows
<code>es = Elasticsearch(
hosts=ELASTICSEARCH_URL,
timeout=5,
ignore_unavailable=True,
# connection_retries=Retry(
# total=5,
# backoff_factor=1.1,
# status_forcelist=[429, 502, 503, 504],
# raise_on_status=False,
# )
)
</code>
<code>es = Elasticsearch(
hosts=ELASTICSEARCH_URL,
timeout=5,
ignore_unavailable=True,
# connection_retries=Retry(
# total=5,
# backoff_factor=1.1,
# status_forcelist=[429, 502, 503, 504],
# raise_on_status=False,
# )
)
</code>
es = Elasticsearch(
hosts=ELASTICSEARCH_URL,
timeout=5,
ignore_unavailable=True,
# connection_retries=Retry(
# total=5,
# backoff_factor=1.1,
# status_forcelist=[429, 502, 503, 504],
# raise_on_status=False,
# )
)
I want to introduce a retry delay between retries (and ideally with a backoff factor).
So that executing a search request, is retried with a backoff factor
<code>es.search(index="abc", body={"query": {"match_all": {}}})
</code>
<code>es.search(index="abc", body={"query": {"match_all": {}}})
</code>
es.search(index="abc", body={"query": {"match_all": {}}})
Any ideas how to achieve this?
0
You can simply use tenacity
lib for this simply do this
See ref to docs
<code>from elasticsearch import Elasticsearch
from tenacity import retry, wait_exponential, stop_after_attempt, retry_if_exception_type
# defining a retry strategy with exp
@retry(
retry=retry_if_exception_type(Exception),
wait=wait_exponential(multiplier=1, min=4, max=10),
stop=stop_after_attempt(5)
)
def retry_func(es, index, body):
return es.search(index=index, body=body)
# Your Elasticsearch client
es = Elasticsearch(
hosts=[{'host': 'localhost', 'port': 9200}],
timeout=5,
CLOUD=ELASTIC_CLOUD_ID # <- if you have multiple instances
)
try:
response = retry_func(es, "abc", {"query": {"match_all": {}}})
print(response)
except Exception as e:
logging.info(f"error message: {e}")
</code>
<code>from elasticsearch import Elasticsearch
from tenacity import retry, wait_exponential, stop_after_attempt, retry_if_exception_type
# defining a retry strategy with exp
@retry(
retry=retry_if_exception_type(Exception),
wait=wait_exponential(multiplier=1, min=4, max=10),
stop=stop_after_attempt(5)
)
def retry_func(es, index, body):
return es.search(index=index, body=body)
# Your Elasticsearch client
es = Elasticsearch(
hosts=[{'host': 'localhost', 'port': 9200}],
timeout=5,
CLOUD=ELASTIC_CLOUD_ID # <- if you have multiple instances
)
try:
response = retry_func(es, "abc", {"query": {"match_all": {}}})
print(response)
except Exception as e:
logging.info(f"error message: {e}")
</code>
from elasticsearch import Elasticsearch
from tenacity import retry, wait_exponential, stop_after_attempt, retry_if_exception_type
# defining a retry strategy with exp
@retry(
retry=retry_if_exception_type(Exception),
wait=wait_exponential(multiplier=1, min=4, max=10),
stop=stop_after_attempt(5)
)
def retry_func(es, index, body):
return es.search(index=index, body=body)
# Your Elasticsearch client
es = Elasticsearch(
hosts=[{'host': 'localhost', 'port': 9200}],
timeout=5,
CLOUD=ELASTIC_CLOUD_ID # <- if you have multiple instances
)
try:
response = retry_func(es, "abc", {"query": {"match_all": {}}})
print(response)
except Exception as e:
logging.info(f"error message: {e}")
3