From ce129a8dc01b82a83186db09a74ff6b38ad4fb06 Mon Sep 17 00:00:00 2001 From: Arash Mosharraf <armoshar@microsoft.com> Date: Sun, 21 Jan 2024 18:51:17 -0600 Subject: [PATCH] added unit test for AzureOpenAILLM --- semantic_router/layer.py | 2 +- semantic_router/route.py | 1 - tests/unit/llms/test_llm_azure_openai.py | 56 ++++++++++++++++++++++++ 3 files changed, 57 insertions(+), 2 deletions(-) create mode 100644 tests/unit/llms/test_llm_azure_openai.py diff --git a/semantic_router/layer.py b/semantic_router/layer.py index ab0e0bf5..3b4560da 100644 --- a/semantic_router/layer.py +++ b/semantic_router/layer.py @@ -7,7 +7,7 @@ import yaml from semantic_router.encoders import BaseEncoder, OpenAIEncoder from semantic_router.linear import similarity_matrix, top_scores -from semantic_router.llms import AzureOpenAILLM, BaseLLM, OpenAILLM +from semantic_router.llms import BaseLLM, OpenAILLM from semantic_router.route import Route from semantic_router.schema import Encoder, EncoderType, RouteChoice from semantic_router.utils.logger import logger diff --git a/semantic_router/route.py b/semantic_router/route.py index b463fde2..e0accca0 100644 --- a/semantic_router/route.py +++ b/semantic_router/route.py @@ -57,7 +57,6 @@ class Route(BaseModel): extracted_inputs = self.llm.extract_function_inputs( query=query, function_schema=self.function_schema ) - logger.info(f"extracted inputs {extracted_inputs}") func_call = extracted_inputs else: # otherwise we just pass None for the call diff --git a/tests/unit/llms/test_llm_azure_openai.py b/tests/unit/llms/test_llm_azure_openai.py new file mode 100644 index 00000000..2f1171db --- /dev/null +++ b/tests/unit/llms/test_llm_azure_openai.py @@ -0,0 +1,56 @@ +import pytest + +from semantic_router.llms import OpenAILLM +from semantic_router.schema import Message + + +@pytest.fixture +def openai_llm(mocker): + mocker.patch("openai.Client") + return OpenAILLM(openai_api_key="test_api_key") + + +class TestOpenAILLM: + def test_openai_llm_init_with_api_key(self, openai_llm): + assert openai_llm.client is not None, "Client should be initialized" + assert openai_llm.name == "gpt-3.5-turbo", "Default name not set correctly" + + def test_openai_llm_init_success(self, mocker): + mocker.patch("os.getenv", return_value="fake-api-key") + llm = OpenAILLM() + assert llm.client is not None + + def test_openai_llm_init_without_api_key(self, mocker): + mocker.patch("os.getenv", return_value=None) + with pytest.raises(ValueError) as _: + OpenAILLM() + + def test_openai_llm_call_uninitialized_client(self, openai_llm): + # Set the client to None to simulate an uninitialized client + openai_llm.client = None + with pytest.raises(ValueError) as e: + llm_input = [Message(role="user", content="test")] + openai_llm(llm_input) + assert "OpenAI client is not initialized." in str(e.value) + + def test_openai_llm_init_exception(self, mocker): + mocker.patch("os.getenv", return_value="fake-api-key") + mocker.patch("openai.OpenAI", side_effect=Exception("Initialization error")) + with pytest.raises(ValueError) as e: + OpenAILLM() + assert ( + "OpenAI API client failed to initialize. Error: Initialization error" + in str(e.value) + ) + + def test_openai_llm_call_success(self, openai_llm, mocker): + mock_completion = mocker.MagicMock() + mock_completion.choices[0].message.content = "test" + + mocker.patch("os.getenv", return_value="fake-api-key") + mocker.patch.object( + openai_llm.client.chat.completions, "create", return_value=mock_completion + ) + llm_input = [Message(role="user", content="test")] + output = openai_llm(llm_input) + assert output == "test" -- GitLab