From 11f39e25fe78a6fdbb705af22c4e52beb7c2698d Mon Sep 17 00:00:00 2001 From: Siraj R Aizlewood <siraj@aurelio.ai> Date: Wed, 21 Feb 2024 17:39:39 +0400 Subject: [PATCH] Initial Pytests for ollama.py. --- tests/unit/llms/test_llm_ollama.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 tests/unit/llms/test_llm_ollama.py diff --git a/tests/unit/llms/test_llm_ollama.py b/tests/unit/llms/test_llm_ollama.py new file mode 100644 index 00000000..cf4c328e --- /dev/null +++ b/tests/unit/llms/test_llm_ollama.py @@ -0,0 +1,29 @@ +import pytest +from semantic_router.llms.ollama import OllamaLLM +from semantic_router.schema import Message + +@pytest.fixture +def ollama_llm(): + return OllamaLLM() + +class TestOllamaLLM: + def test_ollama_llm_init_success(self, ollama_llm): + assert ollama_llm.name == "ollama" + assert ollama_llm.temperature == 0.2 + assert ollama_llm.llm_name == "openhermes" + assert ollama_llm.max_tokens == 200 + assert ollama_llm.stream is False + + def test_ollama_llm_call_success(self, ollama_llm, mocker): + mock_response = mocker.MagicMock() + mock_response.json.return_value = {"message": {"content": "test response"}} + mocker.patch('requests.post', return_value=mock_response) + + output = ollama_llm([Message(role="user", content="test")]) + assert output == "test response" + + def test_ollama_llm_error_handling(self, ollama_llm, mocker): + mocker.patch('requests.post', side_effect=Exception("LLM error")) + with pytest.raises(Exception) as exc_info: + ollama_llm([Message(role="user", content="test")]) + assert "LLM error" in str(exc_info.value) \ No newline at end of file -- GitLab