From 789e77277de9f4c42f679be8a4d14cb0fe6685f0 Mon Sep 17 00:00:00 2001
From: James Briggs <james.briggs@hotmail.com>
Date: Wed, 17 Apr 2024 20:05:50 +0800
Subject: [PATCH] fix: improve exp backoff for openai encoder

---
 semantic_router/encoders/openai.py          | 2 +-
 semantic_router/splitters/rolling_window.py | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/semantic_router/encoders/openai.py b/semantic_router/encoders/openai.py
index de6c0599..7712b19b 100644
--- a/semantic_router/encoders/openai.py
+++ b/semantic_router/encoders/openai.py
@@ -52,7 +52,7 @@ class OpenAIEncoder(BaseEncoder):
         error_message = ""
 
         # Exponential backoff
-        for j in range(3):
+        for j in range(1, 7):
             try:
                 embeds = self.client.embeddings.create(
                     input=docs,
diff --git a/semantic_router/splitters/rolling_window.py b/semantic_router/splitters/rolling_window.py
index b3ca6279..a2809ff5 100644
--- a/semantic_router/splitters/rolling_window.py
+++ b/semantic_router/splitters/rolling_window.py
@@ -76,7 +76,7 @@ class RollingWindowSplitter(BaseSplitter):
         if len(docs) == 1:
             token_count = tiktoken_length(docs[0])
             if token_count > self.max_split_tokens:
-                logger.warning(
+                logger.info(
                     f"Single document exceeds the maximum token limit "
                     f"of {self.max_split_tokens}. "
                     "Splitting to sentences before semantically splitting."
-- 
GitLab