From e65ec815b0dbc9e532ae9198030f77e24f70a00e Mon Sep 17 00:00:00 2001 From: Siraj R Aizlewood <siraj@aurelio.ai> Date: Tue, 30 Apr 2024 04:40:49 +0400 Subject: [PATCH] Linting. --- tests/unit/llms/test_llm_openai.py | 23 ++++++++++++++++------- tests/unit/test_route.py | 1 + 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/tests/unit/llms/test_llm_openai.py b/tests/unit/llms/test_llm_openai.py index 6c01b2bd..c8fdde4a 100644 --- a/tests/unit/llms/test_llm_openai.py +++ b/tests/unit/llms/test_llm_openai.py @@ -2,7 +2,10 @@ import pytest from semantic_router.llms import OpenAILLM from semantic_router.schema import Message -from semantic_router.utils.function_call import get_schema_openai, convert_param_type_to_json_type +from semantic_router.utils.function_call import ( + get_schema_openai, + convert_param_type_to_json_type, +) @pytest.fixture @@ -168,7 +171,6 @@ class TestOpenAILLM: expected_error_message in actual_error_message ), f"Expected error message: '{expected_error_message}', but got: '{actual_error_message}'" - def test_convert_param_type_to_json_type(self): # Test conversion of basic types assert convert_param_type_to_json_type("int") == "number" @@ -185,15 +187,22 @@ class TestOpenAILLM: function_schema = {"function": "get_user_data", "args": ["user_id"]} # Mock the __call__ method to return a JSON string as expected - mocker.patch.object(OpenAILLM, '__call__', return_value='{"user_id": "123"}') + mocker.patch.object(OpenAILLM, "__call__", return_value='{"user_id": "123"}') result = openai_llm.extract_function_inputs(query, function_schema) # Ensure the __call__ method is called with the correct parameters expected_messages = [ - Message(role="system", content="You are an intelligent AI. Given a command or request from the user, call the function to complete the request."), - Message(role="user", content=query) + Message( + role="system", + content="You are an intelligent AI. Given a command or request from the user, call the function to complete the request.", + ), + Message(role="user", content=query), ] - openai_llm.__call__.assert_called_once_with(messages=expected_messages, function_schema=function_schema) + openai_llm.__call__.assert_called_once_with( + messages=expected_messages, function_schema=function_schema + ) # Check if the result is as expected - assert result == {"user_id": "123"}, "The function inputs should match the expected dictionary." \ No newline at end of file + assert result == { + "user_id": "123" + }, "The function inputs should match the expected dictionary." diff --git a/tests/unit/test_route.py b/tests/unit/test_route.py index 5b91f9d3..3cedeadb 100644 --- a/tests/unit/test_route.py +++ b/tests/unit/test_route.py @@ -5,6 +5,7 @@ import pytest from semantic_router.llms import BaseLLM from semantic_router.route import Route, is_valid + # Is valid test: def test_is_valid_with_valid_json(): valid_json = '{"name": "test_route", "utterances": ["hello", "hi"]}' -- GitLab