diff --git a/semantic_router/encoders/openai.py b/semantic_router/encoders/openai.py
index de6c0599710b59a14124ae269eee3b047eb33063..7712b19b7298b5d5a3070b79b131775ad3466349 100644
--- a/semantic_router/encoders/openai.py
+++ b/semantic_router/encoders/openai.py
@@ -52,7 +52,7 @@ class OpenAIEncoder(BaseEncoder):
         error_message = ""
 
         # Exponential backoff
-        for j in range(3):
+        for j in range(1, 7):
             try:
                 embeds = self.client.embeddings.create(
                     input=docs,
diff --git a/semantic_router/splitters/rolling_window.py b/semantic_router/splitters/rolling_window.py
index b3ca6279e6733bf5afa0a57dbefa64c3379fda35..a2809ff53cf15d4a00a87307da6027491d1008ab 100644
--- a/semantic_router/splitters/rolling_window.py
+++ b/semantic_router/splitters/rolling_window.py
@@ -76,7 +76,7 @@ class RollingWindowSplitter(BaseSplitter):
         if len(docs) == 1:
             token_count = tiktoken_length(docs[0])
             if token_count > self.max_split_tokens:
-                logger.warning(
+                logger.info(
                     f"Single document exceeds the maximum token limit "
                     f"of {self.max_split_tokens}. "
                     "Splitting to sentences before semantically splitting."