From 287126d579ef6724cf75d600c30673fd0b193412 Mon Sep 17 00:00:00 2001 From: Arash Mosharraf <armoshar@microsoft.com> Date: Mon, 22 Jan 2024 10:22:45 -0600 Subject: [PATCH] fixed and added more tests --- tests/unit/llms/test_llm_azure_openai.py | 58 +++++++++++++++--------- 1 file changed, 36 insertions(+), 22 deletions(-) diff --git a/tests/unit/llms/test_llm_azure_openai.py b/tests/unit/llms/test_llm_azure_openai.py index 2f1171db..9d4f0b79 100644 --- a/tests/unit/llms/test_llm_azure_openai.py +++ b/tests/unit/llms/test_llm_azure_openai.py @@ -1,56 +1,70 @@ import pytest -from semantic_router.llms import OpenAILLM +from semantic_router.llms import AzureOpenAILLM from semantic_router.schema import Message @pytest.fixture -def openai_llm(mocker): - mocker.patch("openai.Client") - return OpenAILLM(openai_api_key="test_api_key") +def azure_openai_llm(mocker): + mocker.patch("azureopenai.Client") + return AzureOpenAILLM(openai_api_key="test_api_key") class TestOpenAILLM: - def test_openai_llm_init_with_api_key(self, openai_llm): - assert openai_llm.client is not None, "Client should be initialized" - assert openai_llm.name == "gpt-3.5-turbo", "Default name not set correctly" + def test_azure_openai_llm_init_with_api_key(self, azure_openai_llm): + assert azure_openai_llm.client is not None, "Client should be initialized" + assert azure_openai_llm.name == "gpt-3.5-turbo", "Default name not set correctly" - def test_openai_llm_init_success(self, mocker): + def test_azure_openai_llm_init_success(self, mocker): mocker.patch("os.getenv", return_value="fake-api-key") - llm = OpenAILLM() + llm = AzureOpenAILLM() assert llm.client is not None - def test_openai_llm_init_without_api_key(self, mocker): + def test_azure_openai_llm_init_without_api_key(self, mocker): mocker.patch("os.getenv", return_value=None) with pytest.raises(ValueError) as _: - OpenAILLM() + AzureOpenAILLM() - def test_openai_llm_call_uninitialized_client(self, openai_llm): + def test_azure_openai_llm_init_without_azure_endpoint(self, mocker): + mocker.patch("os.getenv", side_effect=[None, "fake-api-key"]) # Simulate missing Azure endpoint + with pytest.raises(ValueError) as e: + AzureOpenAILLM(openai_api_key="test_api_key") + assert "Azure endpoint API key cannot be 'None'" in str(e.value) + + def test_azure_openai_llm_call_uninitialized_client(self, azure_openai_llm): # Set the client to None to simulate an uninitialized client - openai_llm.client = None + azure_openai_llm.client = None with pytest.raises(ValueError) as e: llm_input = [Message(role="user", content="test")] - openai_llm(llm_input) - assert "OpenAI client is not initialized." in str(e.value) + azure_openai_llm(llm_input) + assert "AzureOpenAI client is not initialized." in str(e.value) - def test_openai_llm_init_exception(self, mocker): + def test_azure_openai_llm_init_exception(self, mocker): mocker.patch("os.getenv", return_value="fake-api-key") - mocker.patch("openai.OpenAI", side_effect=Exception("Initialization error")) + mocker.patch("openai.AzureOpenAI", side_effect=Exception("Initialization error")) with pytest.raises(ValueError) as e: - OpenAILLM() + AzureOpenAILLM() assert ( - "OpenAI API client failed to initialize. Error: Initialization error" + "AzureOpenAI API client failed to initialize. Error: Initialization error" in str(e.value) ) + + def test_azure_openai_llm_temperature_max_tokens_initialization(self): + test_temperature = 0.5 + test_max_tokens = 100 + azure_llm = AzureOpenAILLM(openai_api_key="test_api_key", temperature=test_temperature, max_tokens=test_max_tokens) + + assert azure_llm.temperature == test_temperature, "Temperature not set correctly" + assert azure_llm.max_tokens == test_max_tokens, "Max tokens not set correctly" - def test_openai_llm_call_success(self, openai_llm, mocker): + def test_azure_openai_llm_call_success(self, azure_openai_llm, mocker): mock_completion = mocker.MagicMock() mock_completion.choices[0].message.content = "test" mocker.patch("os.getenv", return_value="fake-api-key") mocker.patch.object( - openai_llm.client.chat.completions, "create", return_value=mock_completion + azure_openai_llm.client.chat.completions, "create", return_value=mock_completion ) llm_input = [Message(role="user", content="test")] - output = openai_llm(llm_input) + output = azure_openai_llm(llm_input) assert output == "test" -- GitLab