From 0ecadd21f98abc26ec8f3f95fbcc76f47b765fdf Mon Sep 17 00:00:00 2001
From: Arash Mosharraf <armoshar@microsoft.com>
Date: Thu, 18 Jan 2024 17:10:54 -0600
Subject: [PATCH] lint and formatter execution on Azure llm fix

---
 poetry.lock                      |  3 ++-
 pyproject.toml                   |  1 +
 semantic_router/layer.py         |  2 +-
 semantic_router/llms/__init__.py | 11 +++++++++--
 semantic_router/llms/base.py     |  2 +-
 semantic_router/llms/zure.py     |  9 +++++----
 semantic_router/route.py         |  2 +-
 7 files changed, 20 insertions(+), 10 deletions(-)

diff --git a/poetry.lock b/poetry.lock
index c57d0a3d..0b9f7a10 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -2264,6 +2264,7 @@ files = [
     {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
     {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
     {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
+    {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
     {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
     {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
     {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
@@ -3249,4 +3250,4 @@ local = ["llama-cpp-python", "torch", "transformers"]
 [metadata]
 lock-version = "2.0"
 python-versions = "^3.9"
-content-hash = "1de69e2e5050507790405e09d4cd79fe114b4200a56c87cc609a104366696989"
+content-hash = "0543c088bc34e525b9d337b4378140e9910618b91364144e3a6a5f63f3daf222"
diff --git a/pyproject.toml b/pyproject.toml
index cd959d96..688d522d 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -27,6 +27,7 @@ fastembed = {version = "^0.1.3", optional = true, python = "<3.12"}
 torch = {version = "^2.1.2", optional = true}
 transformers = {version = "^4.36.2", optional = true}
 llama-cpp-python = {version = "^0.2.28", optional = true}
+black = "^23.12.1"
 
 [tool.poetry.extras]
 hybrid = ["pinecone-text"]
diff --git a/semantic_router/layer.py b/semantic_router/layer.py
index 980726a4..ab0e0bf5 100644
--- a/semantic_router/layer.py
+++ b/semantic_router/layer.py
@@ -7,7 +7,7 @@ import yaml
 
 from semantic_router.encoders import BaseEncoder, OpenAIEncoder
 from semantic_router.linear import similarity_matrix, top_scores
-from semantic_router.llms import BaseLLM, OpenAILLM, AzureOpenAILLM
+from semantic_router.llms import AzureOpenAILLM, BaseLLM, OpenAILLM
 from semantic_router.route import Route
 from semantic_router.schema import Encoder, EncoderType, RouteChoice
 from semantic_router.utils.logger import logger
diff --git a/semantic_router/llms/__init__.py b/semantic_router/llms/__init__.py
index 217d441b..f2c66770 100644
--- a/semantic_router/llms/__init__.py
+++ b/semantic_router/llms/__init__.py
@@ -2,7 +2,14 @@ from semantic_router.llms.base import BaseLLM
 from semantic_router.llms.cohere import CohereLLM
 from semantic_router.llms.llamacpp import LlamaCppLLM
 from semantic_router.llms.openai import OpenAILLM
-from semantic_router.llms.zure import AzureOpenAILLM
 from semantic_router.llms.openrouter import OpenRouterLLM
+from semantic_router.llms.zure import AzureOpenAILLM
 
-__all__ = ["BaseLLM", "OpenAILLM", "OpenRouterLLM", "CohereLLM", "LlamaCppLLM", "AzureOpenAILLM"]
+__all__ = [
+    "BaseLLM",
+    "OpenAILLM",
+    "OpenRouterLLM",
+    "CohereLLM",
+    "LlamaCppLLM",
+    "AzureOpenAILLM",
+]
diff --git a/semantic_router/llms/base.py b/semantic_router/llms/base.py
index da5c6054..7addc1b2 100644
--- a/semantic_router/llms/base.py
+++ b/semantic_router/llms/base.py
@@ -75,7 +75,7 @@ class BaseLLM(BaseModel):
         """
         llm_input = [Message(role="user", content=prompt)]
         output = self(llm_input)
-        
+
         if not output:
             raise Exception("No output generated for extract function input")
 
diff --git a/semantic_router/llms/zure.py b/semantic_router/llms/zure.py
index 01b13adb..fa321523 100644
--- a/semantic_router/llms/zure.py
+++ b/semantic_router/llms/zure.py
@@ -17,10 +17,10 @@ class AzureOpenAILLM(BaseLLM):
         self,
         name: Optional[str] = None,
         openai_api_key: Optional[str] = None,
-        azure_endpoint:Optional[str] = None,
+        azure_endpoint: Optional[str] = None,
         temperature: float = 0.01,
         max_tokens: int = 200,
-        api_version="2023-07-01-preview"
+        api_version="2023-07-01-preview",
     ):
         if name is None:
             name = os.getenv("OPENAI_CHAT_MODEL_NAME", "gpt-35-turbo")
@@ -32,8 +32,9 @@ class AzureOpenAILLM(BaseLLM):
         if azure_endpoint is None:
             raise ValueError("Azure endpoint API key cannot be 'None'.")
         try:
-            self.client = openai.AzureOpenAI(api_key=api_key, azure_endpoint=azure_endpoint
-                                             , api_version=api_version)
+            self.client = openai.AzureOpenAI(
+                api_key=api_key, azure_endpoint=azure_endpoint, api_version=api_version
+            )
         except Exception as e:
             raise ValueError(f"OpenAI API client failed to initialize. Error: {e}")
         self.temperature = temperature
diff --git a/semantic_router/route.py b/semantic_router/route.py
index bcc3036d..b463fde2 100644
--- a/semantic_router/route.py
+++ b/semantic_router/route.py
@@ -44,7 +44,7 @@ class Route(BaseModel):
     description: Optional[str] = None
     function_schema: Optional[Dict[str, Any]] = None
     llm: Optional[BaseLLM] = None
-    
+
     def __call__(self, query: str) -> RouteChoice:
         logger.info(f"this is the llm passed to route object {self.llm}")
         if self.function_schema:
-- 
GitLab