diff --git a/.gitignore b/.gitignore
index cc461499d2d720e715c86c33e513b9c259cb5737..c45ff835e404fccfbe848d948eee5ea327bf69c0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -25,5 +25,5 @@ output
 node_modules
 package-lock.json
 package.json
-
+test.ipynb
 ```
diff --git a/poetry.lock b/poetry.lock
index 4ff76b724aca26d398a26a3aae78d32699282589..42adb1617152c1dd105f9f721f38b49fd733c64c 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -246,12 +246,10 @@ files = [
 
 [package.dependencies]
 click = ">=8.0.0"
-ipython = {version = ">=7.8.0", optional = true, markers = "extra == \"jupyter\""}
 mypy-extensions = ">=0.4.3"
 packaging = ">=22.0"
 pathspec = ">=0.9.0"
 platformdirs = ">=2"
-tokenize-rt = {version = ">=3.2.0", optional = true, markers = "extra == \"jupyter\""}
 tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
 typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""}
 
@@ -2837,17 +2835,6 @@ files = [
 [package.dependencies]
 mpmath = ">=0.19"
 
-[[package]]
-name = "tokenize-rt"
-version = "5.2.0"
-description = "A wrapper around the stdlib `tokenize` which roundtrips."
-optional = false
-python-versions = ">=3.8"
-files = [
-    {file = "tokenize_rt-5.2.0-py2.py3-none-any.whl", hash = "sha256:b79d41a65cfec71285433511b50271b05da3584a1da144a0752e9c621a285289"},
-    {file = "tokenize_rt-5.2.0.tar.gz", hash = "sha256:9fe80f8a5c1edad2d3ede0f37481cc0cc1538a2f442c9c2f9e4feacd2792d054"},
-]
-
 [[package]]
 name = "tokenizers"
 version = "0.15.0"
@@ -3374,4 +3361,4 @@ local = ["llama-cpp-python", "torch", "transformers"]
 [metadata]
 lock-version = "2.0"
 python-versions = "^3.9"
-content-hash = "6e658c338fd236a82fd0a9f03a64393b2bd728802e069b0663a5ff786bd203dc"
+content-hash = "20a39eaeaa617613fd7e13038ac4a206cc23e6faf9d5e077bf035bc3243bb5e7"
diff --git a/pyproject.toml b/pyproject.toml
index 84acdcb76ecea3c67dcccdd9846e60d0e59e2e15..7d2ed027657be97497ea020b2d12d8e7a318a267 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -27,6 +27,7 @@ fastembed = {version = "^0.1.3", optional = true, python = "<3.12"}
 torch = {version = "^2.1.2", optional = true}
 transformers = {version = "^4.36.2", optional = true}
 llama-cpp-python = {version = "^0.2.28", optional = true}
+black = "^23.12.1"
 
 [tool.poetry.extras]
 hybrid = ["pinecone-text"]
@@ -36,7 +37,6 @@ local = ["torch", "transformers", "llama-cpp-python"]
 [tool.poetry.group.dev.dependencies]
 ipykernel = "^6.25.0"
 ruff = "^0.1.5"
-black = {extras = ["jupyter"], version = "^23.12.0"}
 pytest = "^7.4.3"
 pytest-mock = "^3.12.0"
 pytest-cov = "^4.1.0"
diff --git a/semantic_router/layer.py b/semantic_router/layer.py
index 15b3fc0d2700529a0daee273aab43df2af03c7dc..3b4560da9d3d8fb7751fd99f854a839d235d8c23 100644
--- a/semantic_router/layer.py
+++ b/semantic_router/layer.py
@@ -194,10 +194,12 @@ class RouteLayer:
                         "default. Ensure API key is set in OPENAI_API_KEY environment "
                         "variable."
                     )
+
                     self.llm = OpenAILLM()
                     route.llm = self.llm
                 else:
                     route.llm = self.llm
+            logger.info(f"LLM  `{route.llm}` is chosen")
             return route(text)
         else:
             # if no route passes threshold, return empty route choice
diff --git a/semantic_router/llms/__init__.py b/semantic_router/llms/__init__.py
index e5aedc85fd30cc0b576fc2170c1b7ca694bdf200..b216f0b0e62fd8bcdf0339ca0b79dbcc910e0c1e 100644
--- a/semantic_router/llms/__init__.py
+++ b/semantic_router/llms/__init__.py
@@ -2,5 +2,6 @@ from semantic_router.llms.base import BaseLLM
 from semantic_router.llms.cohere import CohereLLM
 from semantic_router.llms.openai import OpenAILLM
 from semantic_router.llms.openrouter import OpenRouterLLM
+from semantic_router.llms.zure import AzureOpenAILLM
 
-__all__ = ["BaseLLM", "OpenAILLM", "OpenRouterLLM", "CohereLLM"]
+__all__ = ["BaseLLM", "OpenAILLM", "OpenRouterLLM", "CohereLLM", "AzureOpenAILLM"]
diff --git a/semantic_router/llms/base.py b/semantic_router/llms/base.py
index 43195543dc6bd6c516500af62a2276e9152720b1..1c02c78b65f3dce3babf0ee1cbb6d2a03e780b7b 100644
--- a/semantic_router/llms/base.py
+++ b/semantic_router/llms/base.py
@@ -31,7 +31,6 @@ class BaseLLM(BaseModel):
             param_types = [
                 info.split(":")[1].strip().split("=")[0].strip() for info in param_info
             ]
-
             for name, type_str in zip(param_names, param_types):
                 if name not in inputs:
                     logger.error(f"Input {name} missing from query")
@@ -76,12 +75,14 @@ class BaseLLM(BaseModel):
         """
         llm_input = [Message(role="user", content=prompt)]
         output = self(llm_input)
+
         if not output:
             raise Exception("No output generated for extract function input")
 
         output = output.replace("'", '"').strip().rstrip(",")
-
+        logger.info(f"LLM output: {output}")
         function_inputs = json.loads(output)
+        logger.info(f"Function inputs: {function_inputs}")
         if not self._is_valid_inputs(function_inputs, function_schema):
             raise ValueError("Invalid inputs")
         return function_inputs
diff --git a/semantic_router/llms/zure.py b/semantic_router/llms/zure.py
new file mode 100644
index 0000000000000000000000000000000000000000..f2dff87a7c932fcdeef25f86cda7a0c7d6f2883e
--- /dev/null
+++ b/semantic_router/llms/zure.py
@@ -0,0 +1,61 @@
+import os
+from typing import List, Optional
+
+import openai
+
+from semantic_router.llms import BaseLLM
+from semantic_router.schema import Message
+from semantic_router.utils.logger import logger
+
+
+class AzureOpenAILLM(BaseLLM):
+    client: Optional[openai.AzureOpenAI]
+    temperature: Optional[float]
+    max_tokens: Optional[int]
+
+    def __init__(
+        self,
+        name: Optional[str] = None,
+        openai_api_key: Optional[str] = None,
+        azure_endpoint: Optional[str] = None,
+        temperature: float = 0.01,
+        max_tokens: int = 200,
+        api_version="2023-07-01-preview",
+    ):
+        if name is None:
+            name = os.getenv("OPENAI_CHAT_MODEL_NAME", "gpt-3.5-turbo")
+        super().__init__(name=name)
+        api_key = openai_api_key or os.getenv("AZURE_OPENAI_API_KEY")
+        if api_key is None:
+            raise ValueError("AzureOpenAI API key cannot be 'None'.")
+        azure_endpoint = azure_endpoint or os.getenv("AZURE_OPENAI_ENDPOINT")
+        if azure_endpoint is None:
+            raise ValueError("Azure endpoint API key cannot be 'None'.")
+        try:
+            self.client = openai.AzureOpenAI(
+                api_key=api_key, azure_endpoint=azure_endpoint, api_version=api_version
+            )
+        except Exception as e:
+            raise ValueError(f"AzureOpenAI API client failed to initialize. Error: {e}")
+        self.temperature = temperature
+        self.max_tokens = max_tokens
+
+    def __call__(self, messages: List[Message]) -> str:
+        if self.client is None:
+            raise ValueError("AzureOpenAI client is not initialized.")
+        try:
+            completion = self.client.chat.completions.create(
+                model=self.name,
+                messages=[m.to_openai() for m in messages],
+                temperature=self.temperature,
+                max_tokens=self.max_tokens,
+            )
+
+            output = completion.choices[0].message.content
+
+            if not output:
+                raise Exception("No output generated")
+            return output
+        except Exception as e:
+            logger.error(f"LLM error: {e}")
+            raise Exception(f"LLM error: {e}")
diff --git a/semantic_router/route.py b/semantic_router/route.py
index 112f60fd4332c31d78ca2a4f5dd342a1306b3a24..e06c4bf7b52a75e16310ee27101600ea137b7111 100644
--- a/semantic_router/route.py
+++ b/semantic_router/route.py
@@ -46,6 +46,7 @@ class Route(BaseModel):
     llm: Optional[BaseLLM] = None
 
     def __call__(self, query: str) -> RouteChoice:
+        logger.info(f"this is the llm passed to route object {self.llm}")
         if self.function_schema:
             if not self.llm:
                 raise ValueError(
@@ -96,29 +97,29 @@ class Route(BaseModel):
         logger.info("Generating dynamic route...")
 
         prompt = f"""
-You are tasked to generate a JSON configuration based on the provided
-function schema. Please follow the template below, no other tokens allowed:
-
-<config>
-{{
-    "name": "<function_name>",
-    "utterances": [
-        "<example_utterance_1>",
-        "<example_utterance_2>",
-        "<example_utterance_3>",
-        "<example_utterance_4>",
-        "<example_utterance_5>"]
-}}
-</config>
-
-Only include the "name" and "utterances" keys in your answer.
-The "name" should match the function name and the "utterances"
-should comprise a list of 5 example phrases that could be used to invoke
-the function. Use real values instead of placeholders.
-
-Input schema:
-{function_schema}
-"""
+        You are tasked to generate a JSON configuration based on the provided
+        function schema. Please follow the template below, no other tokens allowed:
+
+        <config>
+        {{
+            "name": "<function_name>",
+            "utterances": [
+                "<example_utterance_1>",
+                "<example_utterance_2>",
+                "<example_utterance_3>",
+                "<example_utterance_4>",
+                "<example_utterance_5>"]
+        }}
+        </config>
+
+        Only include the "name" and "utterances" keys in your answer.
+        The "name" should match the function name and the "utterances"
+        should comprise a list of 5 example phrases that could be used to invoke
+        the function. Use real values instead of placeholders.
+
+        Input schema:
+        {function_schema}
+        """
 
         llm_input = [Message(role="user", content=prompt)]
         output = llm(llm_input)
diff --git a/tests/unit/llms/test_llm_azure_openai.py b/tests/unit/llms/test_llm_azure_openai.py
new file mode 100644
index 0000000000000000000000000000000000000000..a50b08fb85eca26719da69db41b065d4ceef9e86
--- /dev/null
+++ b/tests/unit/llms/test_llm_azure_openai.py
@@ -0,0 +1,94 @@
+import pytest
+
+from semantic_router.llms import AzureOpenAILLM
+from semantic_router.schema import Message
+
+
+@pytest.fixture
+def azure_openai_llm(mocker):
+    mocker.patch("openai.Client")
+    return AzureOpenAILLM(openai_api_key="test_api_key", azure_endpoint="test_endpoint")
+
+
+class TestOpenAILLM:
+    def test_azure_openai_llm_init_with_api_key(self, azure_openai_llm):
+        assert azure_openai_llm.client is not None, "Client should be initialized"
+        assert (
+            azure_openai_llm.name == "gpt-3.5-turbo"
+        ), "Default name not set correctly"
+
+    def test_azure_openai_llm_init_success(self, mocker):
+        mocker.patch("os.getenv", return_value="fake-api-key")
+        llm = AzureOpenAILLM()
+        assert llm.client is not None
+
+    def test_azure_openai_llm_init_without_api_key(self, mocker):
+        mocker.patch("os.getenv", return_value=None)
+        with pytest.raises(ValueError) as _:
+            AzureOpenAILLM()
+
+    # def test_azure_openai_llm_init_without_azure_endpoint(self, mocker):
+    #     mocker.patch("os.getenv", side_effect=[None, "fake-api-key"])
+    #     with pytest.raises(ValueError) as e:
+    #         AzureOpenAILLM(openai_api_key="test_api_key")
+    #     assert "Azure endpoint API key cannot be 'None'." in str(e.value)
+
+    def test_azure_openai_llm_init_without_azure_endpoint(self, mocker):
+        mocker.patch(
+            "os.getenv",
+            side_effect=lambda key, default=None: {
+                "OPENAI_CHAT_MODEL_NAME": "test-model-name"
+            }.get(key, default),
+        )
+        with pytest.raises(ValueError) as e:
+            AzureOpenAILLM(openai_api_key="test_api_key")
+        assert "Azure endpoint API key cannot be 'None'" in str(e.value)
+
+    def test_azure_openai_llm_call_uninitialized_client(self, azure_openai_llm):
+        # Set the client to None to simulate an uninitialized client
+        azure_openai_llm.client = None
+        with pytest.raises(ValueError) as e:
+            llm_input = [Message(role="user", content="test")]
+            azure_openai_llm(llm_input)
+        assert "AzureOpenAI client is not initialized." in str(e.value)
+
+    def test_azure_openai_llm_init_exception(self, mocker):
+        mocker.patch("os.getenv", return_value="fake-api-key")
+        mocker.patch(
+            "openai.AzureOpenAI", side_effect=Exception("Initialization error")
+        )
+        with pytest.raises(ValueError) as e:
+            AzureOpenAILLM()
+        assert (
+            "AzureOpenAI API client failed to initialize. Error: Initialization error"
+            in str(e.value)
+        )
+
+    def test_azure_openai_llm_temperature_max_tokens_initialization(self):
+        test_temperature = 0.5
+        test_max_tokens = 100
+        azure_llm = AzureOpenAILLM(
+            openai_api_key="test_api_key",
+            azure_endpoint="test_endpoint",
+            temperature=test_temperature,
+            max_tokens=test_max_tokens,
+        )
+
+        assert (
+            azure_llm.temperature == test_temperature
+        ), "Temperature not set correctly"
+        assert azure_llm.max_tokens == test_max_tokens, "Max tokens not set correctly"
+
+    def test_azure_openai_llm_call_success(self, azure_openai_llm, mocker):
+        mock_completion = mocker.MagicMock()
+        mock_completion.choices[0].message.content = "test"
+
+        mocker.patch("os.getenv", return_value="fake-api-key")
+        mocker.patch.object(
+            azure_openai_llm.client.chat.completions,
+            "create",
+            return_value=mock_completion,
+        )
+        llm_input = [Message(role="user", content="test")]
+        output = azure_openai_llm(llm_input)
+        assert output == "test"