Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
Semantic Router
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Iterations
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Locked files
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Container registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Service Desk
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Code review analytics
Issue analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
mirrored_repos
MachineLearning
aurelio-labs
Semantic Router
Commits
518a1a18
Unverified
Commit
518a1a18
authored
1 year ago
by
Siraj R Aizlewood
Browse files
Options
Downloads
Patches
Plain Diff
Fixing PyTests.
parent
2bcdd51b
No related branches found
No related tags found
No related merge requests found
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
tests/unit/llms/test_llm_openai.py
+5
-20
5 additions, 20 deletions
tests/unit/llms/test_llm_openai.py
with
5 additions
and
20 deletions
tests/unit/llms/test_llm_openai.py
+
5
−
20
View file @
518a1a18
import
pytest
from
semantic_router.llms
import
OpenAILLM
from
semantic_router.llms
.openai
import
OpenAILLM
,
get_schemas_openai
from
semantic_router.schema
import
Message
from
semantic_router.utils.function_call
import
(
get_schema_openai
,
convert_param_type_to_json_type
,
)
@pytest.fixture
def
openai_llm
(
mocker
):
...
...
@@ -59,7 +54,7 @@ class TestOpenAILLM:
output
=
openai_llm
(
llm_input
)
assert
output
==
"
test
"
def
test_get_schema_openai_with_valid_callable
(
self
):
def
test_get_schema
s
_openai_with_valid_callable
(
self
):
def
sample_function
(
param1
:
int
,
param2
:
str
=
"
default
"
)
->
str
:
"""
Sample function for testing.
"""
return
f
"
param1:
{
param1
}
, param2:
{
param2
}
"
...
...
@@ -85,13 +80,13 @@ class TestOpenAILLM:
},
},
}
schema
=
get_schema_openai
(
sample_function
)
schema
=
get_schema
s
_openai
(
[
sample_function
]
)
assert
schema
==
expected_schema
,
"
Schema did not match expected output.
"
def
test_get_schema_openai_with_non_callable
(
self
):
def
test_get_schema
s
_openai_with_non_callable
(
self
):
non_callable
=
"
I am not a function
"
with
pytest
.
raises
(
ValueError
):
get_schema_openai
(
non_callable
)
get_schema
s
_openai
(
[
non_callable
]
)
def
test_openai_llm_call_with_function_schema
(
self
,
openai_llm
,
mocker
):
mock_completion
=
mocker
.
MagicMock
()
...
...
@@ -171,16 +166,6 @@ class TestOpenAILLM:
expected_error_message
in
actual_error_message
),
f
"
Expected error message:
'
{
expected_error_message
}
'
, but got:
'
{
actual_error_message
}
'"
def
test_convert_param_type_to_json_type
(
self
):
# Test conversion of basic types
assert
convert_param_type_to_json_type
(
"
int
"
)
==
"
number
"
assert
convert_param_type_to_json_type
(
"
float
"
)
==
"
number
"
assert
convert_param_type_to_json_type
(
"
str
"
)
==
"
string
"
assert
convert_param_type_to_json_type
(
"
bool
"
)
==
"
boolean
"
assert
convert_param_type_to_json_type
(
"
NoneType
"
)
==
"
null
"
assert
convert_param_type_to_json_type
(
"
list
"
)
==
"
array
"
# Test conversion of a type not explicitly handled
assert
convert_param_type_to_json_type
(
"
dict
"
)
==
"
object
"
def
test_extract_function_inputs
(
self
,
openai_llm
,
mocker
):
query
=
"
fetch user data
"
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment