From 1a688d76e6fec54a4aa4a5939cc71288762dcd74 Mon Sep 17 00:00:00 2001 From: Yue Fei <59813791+moria97@users.noreply.github.com> Date: Tue, 9 Apr 2024 09:29:31 +0800 Subject: [PATCH] Add retry for batch runner (#12647) --- .../llama_index/core/evaluation/batch_runner.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/llama-index-core/llama_index/core/evaluation/batch_runner.py b/llama-index-core/llama_index/core/evaluation/batch_runner.py index e7ce75b5c4..53a7d72a88 100644 --- a/llama-index-core/llama_index/core/evaluation/batch_runner.py +++ b/llama-index-core/llama_index/core/evaluation/batch_runner.py @@ -1,4 +1,5 @@ import asyncio +from tenacity import retry, stop_after_attempt, wait_exponential from typing import Any, Dict, List, Optional, Sequence, Tuple, cast from llama_index.core.async_utils import asyncio_module @@ -7,6 +8,11 @@ from llama_index.core.base.response.schema import RESPONSE_TYPE, Response from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult +@retry( + reraise=True, + stop=stop_after_attempt(3), + wait=wait_exponential(multiplier=1, min=4, max=10), +) async def eval_response_worker( semaphore: asyncio.Semaphore, evaluator: BaseEvaluator, @@ -26,6 +32,11 @@ async def eval_response_worker( ) +@retry( + reraise=True, + stop=stop_after_attempt(3), + wait=wait_exponential(multiplier=1, min=4, max=10), +) async def eval_worker( semaphore: asyncio.Semaphore, evaluator: BaseEvaluator, @@ -46,6 +57,11 @@ async def eval_worker( ) +@retry( + reraise=True, + stop=stop_after_attempt(3), + wait=wait_exponential(multiplier=1, min=4, max=10), +) async def response_worker( semaphore: asyncio.Semaphore, query_engine: BaseQueryEngine, -- GitLab