diff --git a/docs/02-dynamic-routes.ipynb b/docs/02-dynamic-routes.ipynb index 99550d0aee790fd8044d5a903c8b44549f61176d..3dc13b8b88d014f3f3087efafe0cff4a3956bea4 100644 --- a/docs/02-dynamic-routes.ipynb +++ b/docs/02-dynamic-routes.ipynb @@ -86,18 +86,6 @@ "name": "stderr", "output_type": "stream", "text": [ - "WARNING: Ignoring invalid distribution ~ (C:\\Users\\Siraj\\Documents\\Personal\\Work\\Aurelio\\Virtual Environments\\semantic_router_3\\Lib\\site-packages)\n", - "WARNING: Ignoring invalid distribution ~illow (C:\\Users\\Siraj\\Documents\\Personal\\Work\\Aurelio\\Virtual Environments\\semantic_router_3\\Lib\\site-packages)\n", - "WARNING: Ignoring invalid distribution ~rotobuf (C:\\Users\\Siraj\\Documents\\Personal\\Work\\Aurelio\\Virtual Environments\\semantic_router_3\\Lib\\site-packages)\n", - "WARNING: Ignoring invalid distribution ~ (C:\\Users\\Siraj\\Documents\\Personal\\Work\\Aurelio\\Virtual Environments\\semantic_router_3\\Lib\\site-packages)\n", - "WARNING: Ignoring invalid distribution ~illow (C:\\Users\\Siraj\\Documents\\Personal\\Work\\Aurelio\\Virtual Environments\\semantic_router_3\\Lib\\site-packages)\n", - "WARNING: Ignoring invalid distribution ~rotobuf (C:\\Users\\Siraj\\Documents\\Personal\\Work\\Aurelio\\Virtual Environments\\semantic_router_3\\Lib\\site-packages)\n", - "WARNING: Ignoring invalid distribution ~ (C:\\Users\\Siraj\\Documents\\Personal\\Work\\Aurelio\\Virtual Environments\\semantic_router_3\\Lib\\site-packages)\n", - "WARNING: Ignoring invalid distribution ~illow (C:\\Users\\Siraj\\Documents\\Personal\\Work\\Aurelio\\Virtual Environments\\semantic_router_3\\Lib\\site-packages)\n", - "WARNING: Ignoring invalid distribution ~rotobuf (C:\\Users\\Siraj\\Documents\\Personal\\Work\\Aurelio\\Virtual Environments\\semantic_router_3\\Lib\\site-packages)\n", - "WARNING: Ignoring invalid distribution ~ (C:\\Users\\Siraj\\Documents\\Personal\\Work\\Aurelio\\Virtual Environments\\semantic_router_3\\Lib\\site-packages)\n", - "WARNING: Ignoring invalid distribution ~illow (C:\\Users\\Siraj\\Documents\\Personal\\Work\\Aurelio\\Virtual Environments\\semantic_router_3\\Lib\\site-packages)\n", - "WARNING: Ignoring invalid distribution ~rotobuf (C:\\Users\\Siraj\\Documents\\Personal\\Work\\Aurelio\\Virtual Environments\\semantic_router_3\\Lib\\site-packages)\n", "\n", "[notice] A new release of pip is available: 23.1.2 -> 24.0\n", "[notice] To update, run: python.exe -m pip install --upgrade pip\n" @@ -194,7 +182,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "\u001b[32m2024-05-07 15:12:16 INFO semantic_router.utils.logger local\u001b[0m\n" + "\u001b[32m2024-05-08 01:57:55 INFO semantic_router.utils.logger local\u001b[0m\n" ] } ], @@ -313,7 +301,7 @@ { "data": { "text/plain": [ - "'07:12'" + "'17:57'" ] }, "execution_count": 6, @@ -430,7 +418,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "\u001b[32m2024-05-07 15:12:18 INFO semantic_router.utils.logger Adding `get_time` route\u001b[0m\n" + "\u001b[32m2024-05-08 01:57:56 INFO semantic_router.utils.logger Adding `get_time` route\u001b[0m\n" ] } ], @@ -472,8 +460,8 @@ "name": "stderr", "output_type": "stream", "text": [ - "\u001b[33m2024-05-07 15:12:19 WARNING semantic_router.utils.logger No LLM provided for dynamic route, will use OpenAI LLM default. Ensure API key is set in OPENAI_API_KEY environment variable.\u001b[0m\n", - "\u001b[32m2024-05-07 15:12:20 INFO semantic_router.utils.logger Function inputs: [{'function_name': 'get_time', 'arguments': {'timezone': 'America/New_York'}}]\u001b[0m\n" + "\u001b[33m2024-05-08 01:57:57 WARNING semantic_router.utils.logger No LLM provided for dynamic route, will use OpenAI LLM default. Ensure API key is set in OPENAI_API_KEY environment variable.\u001b[0m\n", + "\u001b[32m2024-05-08 01:57:58 INFO semantic_router.utils.logger Function inputs: [{'function_name': 'get_time', 'arguments': {'timezone': 'America/New_York'}}]\u001b[0m\n" ] }, { @@ -518,7 +506,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "07:12\n" + "17:57\n" ] } ], @@ -764,7 +752,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "\u001b[32m2024-05-07 15:12:20 INFO semantic_router.utils.logger local\u001b[0m\n" + "\u001b[32m2024-05-08 01:57:58 INFO semantic_router.utils.logger local\u001b[0m\n" ] } ], @@ -872,8 +860,8 @@ "name": "stderr", "output_type": "stream", "text": [ - "\u001b[33m2024-05-07 15:12:22 WARNING semantic_router.utils.logger No LLM provided for dynamic route, will use OpenAI LLM default. Ensure API key is set in OPENAI_API_KEY environment variable.\u001b[0m\n", - "\u001b[32m2024-05-07 15:12:23 INFO semantic_router.utils.logger Function inputs: [{'function_name': 'get_time', 'arguments': {'timezone': 'America/New_York'}}]\u001b[0m\n" + "\u001b[33m2024-05-08 01:58:00 WARNING semantic_router.utils.logger No LLM provided for dynamic route, will use OpenAI LLM default. Ensure API key is set in OPENAI_API_KEY environment variable.\u001b[0m\n", + "\u001b[32m2024-05-08 01:58:01 INFO semantic_router.utils.logger Function inputs: [{'function_name': 'get_time', 'arguments': {'timezone': 'America/New_York'}}]\u001b[0m\n" ] }, { @@ -901,7 +889,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "07:12\n" + "17:58\n" ] } ], @@ -925,7 +913,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "\u001b[32m2024-05-07 15:12:24 INFO semantic_router.utils.logger Function inputs: [{'function_name': 'get_time_difference', 'arguments': {'timezone1': 'America/Los_Angeles', 'timezone2': 'Europe/Istanbul'}}]\u001b[0m\n" + "\u001b[32m2024-05-08 01:58:02 INFO semantic_router.utils.logger Function inputs: [{'function_name': 'get_time_difference', 'arguments': {'timezone1': 'America/Los_Angeles', 'timezone2': 'Europe/Istanbul'}}]\u001b[0m\n" ] }, { @@ -977,7 +965,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "\u001b[32m2024-05-07 15:12:26 INFO semantic_router.utils.logger Function inputs: [{'function_name': 'convert_time', 'arguments': {'time': '23:02', 'from_timezone': 'Asia/Dubai', 'to_timezone': 'Asia/Tokyo'}}]\u001b[0m\n" + "\u001b[32m2024-05-08 01:58:04 INFO semantic_router.utils.logger Function inputs: [{'function_name': 'convert_time', 'arguments': {'time': '23:02', 'from_timezone': 'Asia/Dubai', 'to_timezone': 'Asia/Tokyo'}}]\u001b[0m\n" ] }, { @@ -1029,7 +1017,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "\u001b[32m2024-05-07 15:12:29 INFO semantic_router.utils.logger Function inputs: [{'function_name': 'get_time', 'arguments': {'timezone': 'Europe/Prague'}}, {'function_name': 'get_time_difference', 'arguments': {'timezone1': 'Europe/Berlin', 'timezone2': 'Asia/Shanghai'}}, {'function_name': 'convert_time', 'arguments': {'time': '05:53', 'from_timezone': 'Europe/Lisbon', 'to_timezone': 'Asia/Bangkok'}}]\u001b[0m\n" + "\u001b[32m2024-05-08 01:58:07 INFO semantic_router.utils.logger Function inputs: [{'function_name': 'get_time', 'arguments': {'timezone': 'Europe/Prague'}}, {'function_name': 'get_time_difference', 'arguments': {'timezone1': 'Europe/Berlin', 'timezone2': 'Asia/Shanghai'}}, {'function_name': 'convert_time', 'arguments': {'time': '05:53', 'from_timezone': 'Europe/Lisbon', 'to_timezone': 'Asia/Bangkok'}}]\u001b[0m\n" ] } ], @@ -1071,7 +1059,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "13:12\n", + "23:58\n", "The time difference between Europe/Berlin and Asia/Shanghai is 6.0 hours.\n", "11:53\n" ] diff --git a/semantic_router/llms/base.py b/semantic_router/llms/base.py index c8c1f14d0795066a01fe9172587fe939f4c90841..5ee56213d9b2482752f167b8a28895098ca41585 100644 --- a/semantic_router/llms/base.py +++ b/semantic_router/llms/base.py @@ -84,9 +84,9 @@ Your task is to output JSON representing the input arguments of a Python functio This is the Python function's schema: -### FUNCTION_SCHEMA Start ### +### FUNCTION_SCHEMAS Start ### {function_schemas} -### FUNCTION_SCHEMA End ### +### FUNCTION_SCHEMAS End ### This is the input query. @@ -94,9 +94,9 @@ This is the input query. {query} ### QUERY End ### -The arguments that you need to provide values for, together with their datatypes, are stated in "signature" in the FUNCTION_SCHEMA. +The arguments that you need to provide values for, together with their datatypes, are stated in "signature" in the FUNCTION_SCHEMAS. The values these arguments must take are made clear by the QUERY. -Use the FUNCTION_SCHEMA "description" too, as this might provide helpful clues about the arguments and their values. +Use the FUNCTION_SCHEMAS "description" too, as this might provide helpful clues about the arguments and their values. Return only JSON, stating the argument names and their corresponding values. ### FORMATTING_INSTRUCTIONS Start ### diff --git a/semantic_router/llms/openai.py b/semantic_router/llms/openai.py index 6e801298b764e86ad286b23eef2b6ae386a003bc..7df79da8b5163d6ecd2ee21b7032301a1f30b44d 100644 --- a/semantic_router/llms/openai.py +++ b/semantic_router/llms/openai.py @@ -96,7 +96,7 @@ class OpenAILLM(BaseLLM): # Collecting multiple tool calls information output = str( self._extract_tool_calls_info(tool_calls) - ) # str in keepign with base type. + ) # str in keeping with base type. else: content = completion.choices[0].message.content if content is None: diff --git a/tests/unit/llms/test_llm_base.py b/tests/unit/llms/test_llm_base.py index 2208928a107575e8e5bc5306fc92b534713365f8..322435e9a77c998d23c64b2636ace7f0c4e9e6f1 100644 --- a/tests/unit/llms/test_llm_base.py +++ b/tests/unit/llms/test_llm_base.py @@ -16,15 +16,15 @@ class TestBaseLLM: base_llm("test") def test_base_llm_is_valid_inputs_valid_input_pass(self, base_llm): - test_schema = { + test_schemas = [{ "name": "get_time", "description": 'Finds the current time in a specific timezone.\n\n:param timezone: The timezone to find the current time in, should\n be a valid timezone from the IANA Time Zone Database like\n "America/New_York" or "Europe/London". Do NOT put the place\n name itself like "rome", or "new york", you must provide\n the IANA format.\n:type timezone: str\n:return: The current time in the specified timezone.', "signature": "(timezone: str) -> str", "output": "<class 'str'>", - } - test_inputs = {"timezone": "America/New_York"} + }] + test_inputs = [{"timezone": "America/New_York"}] - assert base_llm._is_valid_inputs(test_inputs, test_schema) is True + assert base_llm._is_valid_inputs(test_inputs, test_schemas) is True @pytest.mark.skip(reason="TODO: bug in is_valid_inputs") def test_base_llm_is_valid_inputs_valid_input_fail(self, base_llm): diff --git a/tests/unit/llms/test_llm_llamacpp.py b/tests/unit/llms/test_llm_llamacpp.py index 9f579cdf44713c6eff6dd0c3a5ef8fbcba0e10b5..04a3ad3bc5528d8797ec326b444ba6af2db1c236 100644 --- a/tests/unit/llms/test_llm_llamacpp.py +++ b/tests/unit/llms/test_llm_llamacpp.py @@ -61,7 +61,7 @@ class TestLlamaCppLLM: test_query = "What time is it in America/New_York?" llamacpp_llm.extract_function_inputs( - query=test_query, function_schema=test_schema + query=test_query, function_schemas=[test_schema] ) def test_llamacpp_extract_function_inputs_invalid(self, llamacpp_llm, mocker): @@ -82,5 +82,5 @@ class TestLlamaCppLLM: test_query = "What time is it in America/New_York?" llamacpp_llm.extract_function_inputs( - query=test_query, function_schema=test_schema + query=test_query, function_schemas=[test_schema] ) diff --git a/tests/unit/llms/test_llm_openai.py b/tests/unit/llms/test_llm_openai.py index 13217c8475d35e9107b170140ff64a8c29c1380e..bc014bed9e17545ca31877fa6cb9eff77cc7e89e 100644 --- a/tests/unit/llms/test_llm_openai.py +++ b/tests/unit/llms/test_llm_openai.py @@ -60,27 +60,29 @@ class TestOpenAILLM: """Sample function for testing.""" return f"param1: {param1}, param2: {param2}" - expected_schema = { - "type": "function", - "function": { - "name": "sample_function", - "description": "Sample function for testing.", - "parameters": { - "type": "object", - "properties": { - "param1": { - "type": "number", - "description": "No description available.", - }, - "param2": { - "type": "string", - "description": "No description available.", + expected_schema = [ + { + "type": "function", + "function": { + "name": "sample_function", + "description": "Sample function for testing.", + "parameters": { + "type": "object", + "properties": { + "param1": { + "type": "number", + "description": "No description available.", + }, + "param2": { + "type": "string", + "description": "No description available.", + }, }, + "required": ["param1"], }, - "required": ["param1"], }, - }, - } + } + ] schema = get_schemas_openai([sample_function]) assert schema == expected_schema, "Schema did not match expected output." @@ -89,19 +91,38 @@ class TestOpenAILLM: with pytest.raises(ValueError): get_schemas_openai([non_callable]) + # def test_openai_llm_call_with_function_schema(self, openai_llm, mocker): + # mock_completion = mocker.MagicMock() + # mock_completion.choices[0].message.tool_calls = [ + # mocker.MagicMock(function=mocker.MagicMock(arguments="result")) + # ] + # mocker.patch.object( + # openai_llm.client.chat.completions, "create", return_value=mock_completion + # ) + # llm_input = [Message(role="user", content="test")] + # function_schemas = [{"type": "function", "name": "sample_function"}] + # output = openai_llm(llm_input, function_schemas) + # assert ( + # output == "result" + # ), "Output did not match expected result with function schema" + def test_openai_llm_call_with_function_schema(self, openai_llm, mocker): + # Mocking the tool call with valid JSON arguments and setting the function name explicitly + mock_function = mocker.MagicMock(arguments='{"timezone":"America/New_York"}') + mock_function.name = "sample_function" # Set the function name explicitly here + mock_tool_call = mocker.MagicMock(function=mock_function) mock_completion = mocker.MagicMock() - mock_completion.choices[0].message.tool_calls = [ - mocker.MagicMock(function=mocker.MagicMock(arguments="result")) - ] + mock_completion.choices[0].message.tool_calls = [mock_tool_call] + mocker.patch.object( openai_llm.client.chat.completions, "create", return_value=mock_completion ) + llm_input = [Message(role="user", content="test")] function_schemas = [{"type": "function", "name": "sample_function"}] output = openai_llm(llm_input, function_schemas) assert ( - output == "result" + output == "[{'function_name': 'sample_function', 'arguments': {'timezone': 'America/New_York'}}]" ), "Output did not match expected result with function schema" def test_openai_llm_call_with_invalid_tool_calls(self, openai_llm, mocker): @@ -137,42 +158,38 @@ class TestOpenAILLM: openai_llm(llm_input, function_schemas) expected_error_message = ( - "LLM error: Invalid output, expected arguments to be specified." + "LLM error: Invalid output, expected arguments to be specified for each tool call." ) actual_error_message = str(exc_info.value) assert ( expected_error_message in actual_error_message ), f"Expected error message: '{expected_error_message}', but got: '{actual_error_message}'" - def test_openai_llm_call_with_multiple_tools_specified(self, openai_llm, mocker): - mock_completion = mocker.MagicMock() - mock_completion.choices[0].message.tool_calls = [ - mocker.MagicMock(), - mocker.MagicMock(), - ] - mocker.patch.object( - openai_llm.client.chat.completions, "create", return_value=mock_completion - ) - llm_input = [Message(role="user", content="test")] - function_schemas = [{"type": "function", "name": "sample_function"}] - - with pytest.raises(Exception) as exc_info: - openai_llm(llm_input, function_schemas) - - expected_error_message = ( - "LLM error: Invalid output, expected a single tool to be specified." - ) - actual_error_message = str(exc_info.value) - assert ( - expected_error_message in actual_error_message - ), f"Expected error message: '{expected_error_message}', but got: '{actual_error_message}'" def test_extract_function_inputs(self, openai_llm, mocker): query = "fetch user data" - function_schemas = [{"function": "get_user_data", "args": ["user_id"]}] + function_schemas = [ + { + "type": "function", + "function": { + "name": "get_user_data", + "description": "Function to fetch user data.", + "parameters": { + "type": "object", + "properties": { + "user_id": { + "type": "string", + "description": "The ID of the user." + } + }, + "required": ["user_id"] + } + } + } + ] # Mock the __call__ method to return a JSON string as expected - mocker.patch.object(OpenAILLM, "__call__", return_value='{"user_id": "123"}') + mocker.patch.object(OpenAILLM, "__call__", return_value='[{"function_name": "get_user_data", "arguments": {"user_id": "123"}}]') result = openai_llm.extract_function_inputs(query, function_schemas) # Ensure the __call__ method is called with the correct parameters @@ -188,6 +205,4 @@ class TestOpenAILLM: ) # Check if the result is as expected - assert result == { - "user_id": "123" - }, "The function inputs should match the expected dictionary." + assert result == [{"function_name": "get_user_data", "arguments": {"user_id": "123"}}], "The function inputs should match the expected dictionary." \ No newline at end of file diff --git a/tests/unit/test_route.py b/tests/unit/test_route.py index 3cedeadb998fa6632e87672752b9f6da9ddfe4c6..59050dbb4f1941372ee38074775ee911bd3637f0 100644 --- a/tests/unit/test_route.py +++ b/tests/unit/test_route.py @@ -63,12 +63,12 @@ class MockLLM(BaseLLM): class TestRoute: def test_value_error_in_route_call(self): - function_schema = {"name": "test_function", "type": "function"} + function_schemas = [{"name": "test_function", "type": "function"}] route = Route( name="test_function", utterances=["utterance1", "utterance2"], - function_schema=function_schema, + function_schemas=function_schemas, ) with pytest.raises(ValueError): @@ -76,9 +76,9 @@ class TestRoute: def test_generate_dynamic_route(self): mock_llm = MockLLM(name="test") - function_schema = {"name": "test_function", "type": "function"} + function_schemas = {"name": "test_function", "type": "function"}# route = Route._generate_dynamic_route( - llm=mock_llm, function_schema=function_schema + llm=mock_llm, function_schemas=function_schemas, route_name="test_route" ) assert route.name == "test_function" assert route.utterances == [ @@ -107,8 +107,8 @@ class TestRoute: # } # </config> # """ - # function_schema = {"name": "test_function", "type": "function"} - # route = await Route._generate_dynamic_route(function_schema) + # function_schemas = [{"name": "test_function", "type": "function"}] + # route = await Route._generate_dynamic_route(function_schemas) # assert route.name == "test_function" # assert route.utterances == [ # "example_utterance_1", @@ -124,7 +124,7 @@ class TestRoute: "name": "test", "utterances": ["utterance"], "description": None, - "function_schema": None, + "function_schemas": None, "llm": None, "score_threshold": None, } @@ -144,7 +144,7 @@ class TestRoute: """Test function docstring""" pass - dynamic_route = Route.from_dynamic_route(llm=mock_llm, entity=test_function) + dynamic_route = Route.from_dynamic_route(llm=mock_llm, entities=[test_function], route_name="test_route") assert dynamic_route.name == "test_function" assert dynamic_route.utterances == [