diff --git a/tests/unit/encoders/test_azure.py b/tests/unit/encoders/test_azure.py
index 3969543e8fb28572923db185ef49f78cb724c080..5a6841d7edf5038aae4a9a7124331a760279b626 100644
--- a/tests/unit/encoders/test_azure.py
+++ b/tests/unit/encoders/test_azure.py
@@ -1,4 +1,5 @@
 import pytest
+from unittest.mock import AsyncMock, Mock, patch
 from openai import OpenAIError
 from openai.types import CreateEmbeddingResponse, Embedding
 from openai.types.create_embedding_response import Usage
@@ -7,14 +8,26 @@ from semantic_router.encoders import AzureOpenAIEncoder
 
 
 @pytest.fixture
-def openai_encoder(mocker):
-    mocker.patch("openai.Client")
+def mock_openai_client():
+    with patch("openai.AzureOpenAI") as mock_client:
+        yield mock_client
+
+
+@pytest.fixture
+def mock_openai_async_client():
+    with patch("openai.AsyncAzureOpenAI") as mock_async_client:
+        yield mock_async_client
+
+
+@pytest.fixture
+def openai_encoder(mock_openai_client, mock_openai_async_client):
     return AzureOpenAIEncoder(
         api_key="test_api_key",
         deployment_name="test-deployment",
         azure_endpoint="test_endpoint",
         api_version="test_version",
         model="test_model",
+        max_retries=2,
     )
 
 
@@ -70,21 +83,10 @@ class TestAzureOpenAIEncoder:
         mocker.patch.object(
             openai_encoder.client.embeddings, "create", side_effect=responses
         )
-        embeddings = openai_encoder(["test document"])
+        with patch("semantic_router.encoders.zure.sleep", return_value=None):
+            embeddings = openai_encoder(["test document"])
         assert embeddings == [[0.1, 0.2]]
 
-    def test_openai_encoder_call_with_retries(self, openai_encoder, mocker):
-        mocker.patch("os.getenv", return_value="fake-api-key")
-        mocker.patch("time.sleep", return_value=None)  # To speed up the test
-        mocker.patch.object(
-            openai_encoder.client.embeddings,
-            "create",
-            side_effect=OpenAIError("Test error"),
-        )
-        with pytest.raises(ValueError) as e:
-            openai_encoder(["test document"])
-        assert "No embeddings returned." in str(e.value)
-
     def test_openai_encoder_call_failure_non_openai_error(self, openai_encoder, mocker):
         mocker.patch("os.getenv", return_value="fake-api-key")
         mocker.patch("time.sleep", return_value=None)  # To speed up the test
@@ -93,8 +95,9 @@ class TestAzureOpenAIEncoder:
             "create",
             side_effect=Exception("Non-OpenAIError"),
         )
-        with pytest.raises(ValueError) as e:
-            openai_encoder(["test document"])
+        with patch("semantic_router.encoders.zure.sleep", return_value=None):
+            with pytest.raises(ValueError) as e:
+                openai_encoder(["test document"])
 
         assert "OpenAI API call failed. Error: Non-OpenAIError" in str(e.value)
 
@@ -120,5 +123,128 @@ class TestAzureOpenAIEncoder:
         mocker.patch.object(
             openai_encoder.client.embeddings, "create", side_effect=responses
         )
-        embeddings = openai_encoder(["test document"])
+        with patch("semantic_router.encoders.zure.sleep", return_value=None):
+            embeddings = openai_encoder(["test document"])
         assert embeddings == [[0.1, 0.2]]
+
+    def test_retry_logic_sync(self, openai_encoder, mock_openai_client, mocker):
+        # Mock the embeddings.create method to raise an error twice, then succeed
+        mock_create = Mock(
+            side_effect=[
+                OpenAIError("API error"),
+                OpenAIError("API error"),
+                CreateEmbeddingResponse(
+                    data=[
+                        Embedding(
+                            embedding=[0.1, 0.2, 0.3], index=0, object="embedding"
+                        )
+                    ],
+                    model="text-embedding-3-small",
+                    object="list",
+                    usage={"prompt_tokens": 5, "total_tokens": 5},
+                ),
+            ]
+        )
+        mock_openai_client.return_value.embeddings.create = mock_create
+        mocker.patch("time.sleep", return_value=None)  # To speed up the test
+
+        # Patch the sleep function in the encoder module to avoid actual sleep
+        with patch("semantic_router.encoders.zure.sleep", return_value=None):
+            result = openai_encoder(["test document"])
+
+        assert result == [[0.1, 0.2, 0.3]]
+        assert mock_create.call_count == 3
+
+    def test_no_retry_on_max_retries_zero(self, openai_encoder, mock_openai_client):
+        openai_encoder.max_retries = 0
+        # Mock the embeddings.create method to always raise an error
+        mock_create = Mock(side_effect=OpenAIError("API error"))
+        mock_openai_client.return_value.embeddings.create = mock_create
+
+        with pytest.raises(OpenAIError):
+            openai_encoder(["test document"])
+
+        assert mock_create.call_count == 1  # Only the initial attempt, no retries
+
+    def test_retry_logic_sync_max_retries_exceeded(
+        self, openai_encoder, mock_openai_client, mocker
+    ):
+        # Mock the embeddings.create method to always raise an error
+        mock_create = Mock(side_effect=OpenAIError("API error"))
+        mock_openai_client.return_value.embeddings.create = mock_create
+        mocker.patch("time.sleep", return_value=None)  # To speed up the test
+
+        # Patch the sleep function in the encoder module to avoid actual sleep
+        with patch("semantic_router.encoders.zure.sleep", return_value=None):
+            with pytest.raises(OpenAIError):
+                openai_encoder(["test document"])
+
+        assert mock_create.call_count == 3  # Initial attempt + 2 retries
+
+    @pytest.mark.asyncio
+    async def test_retry_logic_async(
+        self, openai_encoder, mock_openai_async_client, mocker
+    ):
+        # Set up the mock to fail twice, then succeed
+        mock_create = AsyncMock(
+            side_effect=[
+                OpenAIError("API error"),
+                OpenAIError("API error"),
+                CreateEmbeddingResponse(
+                    data=[
+                        Embedding(
+                            embedding=[0.1, 0.2, 0.3], index=0, object="embedding"
+                        )
+                    ],
+                    model="text-embedding-3-small",
+                    object="list",
+                    usage={"prompt_tokens": 5, "total_tokens": 5},
+                ),
+            ]
+        )
+        mock_openai_async_client.return_value.embeddings.create = mock_create
+        mocker.patch("asyncio.sleep", return_value=None)  # To speed up the test
+
+        # Patch the asleep function in the encoder module to avoid actual sleep
+        with patch("semantic_router.encoders.zure.asleep", return_value=None):
+            result = await openai_encoder.acall(["test document"])
+
+        assert result == [[0.1, 0.2, 0.3]]
+        assert mock_create.call_count == 3
+
+    @pytest.mark.asyncio
+    async def test_retry_logic_async_max_retries_exceeded(
+        self, openai_encoder, mock_openai_async_client, mocker
+    ):
+        # Mock the embeddings.create method to always raise an error
+        async def raise_error(*args, **kwargs):
+            raise OpenAIError("API error")
+
+        mock_create = Mock(side_effect=raise_error)
+        mock_openai_async_client.return_value.embeddings.create = mock_create
+        mocker.patch("asyncio.sleep", return_value=None)  # To speed up the test
+
+        # Patch the asleep function in the encoder module to avoid actual sleep
+        with patch("semantic_router.encoders.zure.asleep", return_value=None):
+            with pytest.raises(OpenAIError):
+                await openai_encoder.acall(["test document"])
+
+        assert mock_create.call_count == 3  # Initial attempt + 2 retries
+
+    @pytest.mark.asyncio
+    async def test_no_retry_on_max_retries_zero_async(
+        self, openai_encoder, mock_openai_async_client
+    ):
+        openai_encoder.max_retries = 0
+
+        # Mock the embeddings.create method to always raise an error
+        async def raise_error(*args, **kwargs):
+            raise OpenAIError("API error")
+
+        mock_create = AsyncMock(side_effect=raise_error)
+        mock_openai_async_client.return_value.embeddings.create = mock_create
+
+        with pytest.raises(OpenAIError):
+            await openai_encoder.acall(["test document"])
+
+        assert mock_create.call_count == 1  # Only the initial attempt, no retries
diff --git a/tests/unit/encoders/test_openai.py b/tests/unit/encoders/test_openai.py
index 56ef2b38e5082848279624bedfbc9417ec610d6b..538e969250051c3da305702a42944903d1cb71cf 100644
--- a/tests/unit/encoders/test_openai.py
+++ b/tests/unit/encoders/test_openai.py
@@ -1,4 +1,5 @@
 import pytest
+from unittest.mock import AsyncMock, Mock, patch
 from openai import OpenAIError
 from openai.types import CreateEmbeddingResponse, Embedding
 from openai.types.create_embedding_response import Usage
@@ -7,9 +8,20 @@ from semantic_router.encoders import OpenAIEncoder
 
 
 @pytest.fixture
-def openai_encoder(mocker):
-    mocker.patch("openai.Client")
-    return OpenAIEncoder(openai_api_key="test_api_key")
+def mock_openai_client():
+    with patch("openai.Client") as mock_client:
+        yield mock_client
+
+
+@pytest.fixture
+def mock_openai_async_client():
+    with patch("openai.AsyncClient") as mock_async_client:
+        yield mock_async_client
+
+
+@pytest.fixture
+def openai_encoder(mock_openai_client, mock_openai_async_client):
+    return OpenAIEncoder(openai_api_key="fake_key", max_retries=2)
 
 
 class TestOpenAIEncoder:
@@ -64,21 +76,10 @@ class TestOpenAIEncoder:
         mocker.patch.object(
             openai_encoder.client.embeddings, "create", side_effect=responses
         )
-        embeddings = openai_encoder(["test document"])
+        with patch("semantic_router.encoders.openai.sleep", return_value=None):
+            embeddings = openai_encoder(["test document"])
         assert embeddings == [[0.1, 0.2]]
 
-    def test_openai_encoder_call_with_retries(self, openai_encoder, mocker):
-        mocker.patch("os.getenv", return_value="fake-api-key")
-        mocker.patch("time.sleep", return_value=None)  # To speed up the test
-        mocker.patch.object(
-            openai_encoder.client.embeddings,
-            "create",
-            side_effect=OpenAIError("Test error"),
-        )
-        with pytest.raises(ValueError) as e:
-            openai_encoder(["test document"])
-        assert "No embeddings returned." in str(e.value)
-
     def test_openai_encoder_call_failure_non_openai_error(self, openai_encoder, mocker):
         mocker.patch("os.getenv", return_value="fake-api-key")
         mocker.patch("time.sleep", return_value=None)  # To speed up the test
@@ -87,8 +88,9 @@ class TestOpenAIEncoder:
             "create",
             side_effect=Exception("Non-OpenAIError"),
         )
-        with pytest.raises(ValueError) as e:
-            openai_encoder(["test document"])
+        with patch("semantic_router.encoders.openai.sleep", return_value=None):
+            with pytest.raises(ValueError) as e:
+                openai_encoder(["test document"])
 
         assert "OpenAI API call failed. Error: Non-OpenAIError" in str(e.value)
 
@@ -114,5 +116,128 @@ class TestOpenAIEncoder:
         mocker.patch.object(
             openai_encoder.client.embeddings, "create", side_effect=responses
         )
-        embeddings = openai_encoder(["test document"])
+        with patch("semantic_router.encoders.openai.sleep", return_value=None):
+            embeddings = openai_encoder(["test document"])
         assert embeddings == [[0.1, 0.2]]
+
+    def test_retry_logic_sync(self, openai_encoder, mock_openai_client, mocker):
+        # Mock the embeddings.create method to raise an error twice, then succeed
+        mock_create = Mock(
+            side_effect=[
+                OpenAIError("API error"),
+                OpenAIError("API error"),
+                CreateEmbeddingResponse(
+                    data=[
+                        Embedding(
+                            embedding=[0.1, 0.2, 0.3], index=0, object="embedding"
+                        )
+                    ],
+                    model="text-embedding-3-small",
+                    object="list",
+                    usage={"prompt_tokens": 5, "total_tokens": 5},
+                ),
+            ]
+        )
+        mock_openai_client.return_value.embeddings.create = mock_create
+        mocker.patch("time.sleep", return_value=None)  # To speed up the test
+
+        # Patch the sleep function in the encoder module to avoid actual sleep
+        with patch("semantic_router.encoders.openai.sleep", return_value=None):
+            result = openai_encoder(["test document"])
+
+        assert result == [[0.1, 0.2, 0.3]]
+        assert mock_create.call_count == 3
+
+    def test_no_retry_on_max_retries_zero(self, openai_encoder, mock_openai_client):
+        openai_encoder.max_retries = 0
+        # Mock the embeddings.create method to always raise an error
+        mock_create = Mock(side_effect=OpenAIError("API error"))
+        mock_openai_client.return_value.embeddings.create = mock_create
+
+        with pytest.raises(OpenAIError):
+            openai_encoder(["test document"])
+
+        assert mock_create.call_count == 1  # Only the initial attempt, no retries
+
+    def test_retry_logic_sync_max_retries_exceeded(
+        self, openai_encoder, mock_openai_client, mocker
+    ):
+        # Mock the embeddings.create method to always raise an error
+        mock_create = Mock(side_effect=OpenAIError("API error"))
+        mock_openai_client.return_value.embeddings.create = mock_create
+        mocker.patch("time.sleep", return_value=None)  # To speed up the test
+
+        # Patch the sleep function in the encoder module to avoid actual sleep
+        with patch("semantic_router.encoders.openai.sleep", return_value=None):
+            with pytest.raises(OpenAIError):
+                openai_encoder(["test document"])
+
+        assert mock_create.call_count == 3  # Initial attempt + 2 retries
+
+    @pytest.mark.asyncio
+    async def test_retry_logic_async(
+        self, openai_encoder, mock_openai_async_client, mocker
+    ):
+        # Set up the mock to fail twice, then succeed
+        mock_create = AsyncMock(
+            side_effect=[
+                OpenAIError("API error"),
+                OpenAIError("API error"),
+                CreateEmbeddingResponse(
+                    data=[
+                        Embedding(
+                            embedding=[0.1, 0.2, 0.3], index=0, object="embedding"
+                        )
+                    ],
+                    model="text-embedding-3-small",
+                    object="list",
+                    usage={"prompt_tokens": 5, "total_tokens": 5},
+                ),
+            ]
+        )
+        mock_openai_async_client.return_value.embeddings.create = mock_create
+        mocker.patch("asyncio.sleep", return_value=None)  # To speed up the test
+
+        # Patch the asleep function in the encoder module to avoid actual sleep
+        with patch("semantic_router.encoders.openai.asleep", return_value=None):
+            result = await openai_encoder.acall(["test document"])
+
+        assert result == [[0.1, 0.2, 0.3]]
+        assert mock_create.call_count == 3
+
+    @pytest.mark.asyncio
+    async def test_retry_logic_async_max_retries_exceeded(
+        self, openai_encoder, mock_openai_async_client, mocker
+    ):
+        # Mock the embeddings.create method to always raise an error
+        async def raise_error(*args, **kwargs):
+            raise OpenAIError("API error")
+
+        mock_create = Mock(side_effect=raise_error)
+        mock_openai_async_client.return_value.embeddings.create = mock_create
+        mocker.patch("asyncio.sleep", return_value=None)  # To speed up the test
+
+        # Patch the asleep function in the encoder module to avoid actual sleep
+        with patch("semantic_router.encoders.openai.asleep", return_value=None):
+            with pytest.raises(OpenAIError):
+                await openai_encoder.acall(["test document"])
+
+        assert mock_create.call_count == 3  # Initial attempt + 2 retries
+
+    @pytest.mark.asyncio
+    async def test_no_retry_on_max_retries_zero_async(
+        self, openai_encoder, mock_openai_async_client
+    ):
+        openai_encoder.max_retries = 0
+
+        # Mock the embeddings.create method to always raise an error
+        async def raise_error(*args, **kwargs):
+            raise OpenAIError("API error")
+
+        mock_create = AsyncMock(side_effect=raise_error)
+        mock_openai_async_client.return_value.embeddings.create = mock_create
+
+        with pytest.raises(OpenAIError):
+            await openai_encoder.acall(["test document"])
+
+        assert mock_create.call_count == 1  # Only the initial attempt, no retries