diff --git a/semantic_router/layer.py b/semantic_router/layer.py
index ab0e0bf57275078f687dae5f52b729dc401e9de2..3b4560da9d3d8fb7751fd99f854a839d235d8c23 100644
--- a/semantic_router/layer.py
+++ b/semantic_router/layer.py
@@ -7,7 +7,7 @@ import yaml
 
 from semantic_router.encoders import BaseEncoder, OpenAIEncoder
 from semantic_router.linear import similarity_matrix, top_scores
-from semantic_router.llms import AzureOpenAILLM, BaseLLM, OpenAILLM
+from semantic_router.llms import BaseLLM, OpenAILLM
 from semantic_router.route import Route
 from semantic_router.schema import Encoder, EncoderType, RouteChoice
 from semantic_router.utils.logger import logger
diff --git a/semantic_router/route.py b/semantic_router/route.py
index b463fde2377eb579bd058c956de043ca55df9df1..e0accca0188663c9dff35bf07ff6008dfc776893 100644
--- a/semantic_router/route.py
+++ b/semantic_router/route.py
@@ -57,7 +57,6 @@ class Route(BaseModel):
             extracted_inputs = self.llm.extract_function_inputs(
                 query=query, function_schema=self.function_schema
             )
-            logger.info(f"extracted inputs {extracted_inputs}")
             func_call = extracted_inputs
         else:
             # otherwise we just pass None for the call
diff --git a/tests/unit/llms/test_llm_azure_openai.py b/tests/unit/llms/test_llm_azure_openai.py
new file mode 100644
index 0000000000000000000000000000000000000000..2f1171db84177d65b1bd2f438d531cadc8522edf
--- /dev/null
+++ b/tests/unit/llms/test_llm_azure_openai.py
@@ -0,0 +1,56 @@
+import pytest
+
+from semantic_router.llms import OpenAILLM
+from semantic_router.schema import Message
+
+
+@pytest.fixture
+def openai_llm(mocker):
+    mocker.patch("openai.Client")
+    return OpenAILLM(openai_api_key="test_api_key")
+
+
+class TestOpenAILLM:
+    def test_openai_llm_init_with_api_key(self, openai_llm):
+        assert openai_llm.client is not None, "Client should be initialized"
+        assert openai_llm.name == "gpt-3.5-turbo", "Default name not set correctly"
+
+    def test_openai_llm_init_success(self, mocker):
+        mocker.patch("os.getenv", return_value="fake-api-key")
+        llm = OpenAILLM()
+        assert llm.client is not None
+
+    def test_openai_llm_init_without_api_key(self, mocker):
+        mocker.patch("os.getenv", return_value=None)
+        with pytest.raises(ValueError) as _:
+            OpenAILLM()
+
+    def test_openai_llm_call_uninitialized_client(self, openai_llm):
+        # Set the client to None to simulate an uninitialized client
+        openai_llm.client = None
+        with pytest.raises(ValueError) as e:
+            llm_input = [Message(role="user", content="test")]
+            openai_llm(llm_input)
+        assert "OpenAI client is not initialized." in str(e.value)
+
+    def test_openai_llm_init_exception(self, mocker):
+        mocker.patch("os.getenv", return_value="fake-api-key")
+        mocker.patch("openai.OpenAI", side_effect=Exception("Initialization error"))
+        with pytest.raises(ValueError) as e:
+            OpenAILLM()
+        assert (
+            "OpenAI API client failed to initialize. Error: Initialization error"
+            in str(e.value)
+        )
+
+    def test_openai_llm_call_success(self, openai_llm, mocker):
+        mock_completion = mocker.MagicMock()
+        mock_completion.choices[0].message.content = "test"
+
+        mocker.patch("os.getenv", return_value="fake-api-key")
+        mocker.patch.object(
+            openai_llm.client.chat.completions, "create", return_value=mock_completion
+        )
+        llm_input = [Message(role="user", content="test")]
+        output = openai_llm(llm_input)
+        assert output == "test"