From b38a0b75e48c0a9216121d4344ded4696e0b556d Mon Sep 17 00:00:00 2001 From: Siraj R Aizlewood <siraj@aurelio.ai> Date: Wed, 21 Feb 2024 17:08:27 +0400 Subject: [PATCH] Linting --- semantic_router/llms/ollama.py | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/semantic_router/llms/ollama.py b/semantic_router/llms/ollama.py index 8aa0045b..1c1210a0 100644 --- a/semantic_router/llms/ollama.py +++ b/semantic_router/llms/ollama.py @@ -1,7 +1,5 @@ -import os from typing import List, Optional import requests -import json from semantic_router.llms import BaseLLM from semantic_router.schema import Message @@ -29,14 +27,13 @@ class OllamaLLM(BaseLLM): self.stream = stream def __call__( - self, - messages: List[Message], - temperature: Optional[float] = None, - llm_name: Optional[str] = None, - max_tokens: Optional[int] = None, - stream: Optional[bool] = None + self, + messages: List[Message], + temperature: Optional[float] = None, + llm_name: Optional[str] = None, + max_tokens: Optional[int] = None, + stream: Optional[bool] = None, ) -> str: - # Use instance defaults if not overridden temperature = temperature if temperature is not None else self.temperature llm_name = llm_name if llm_name is not None else self.llm_name @@ -47,19 +44,16 @@ class OllamaLLM(BaseLLM): payload = { "model": llm_name, "messages": [m.to_openai() for m in messages], - "options": { - "temperature": temperature, - "num_predict": max_tokens - }, + "options": {"temperature": temperature, "num_predict": max_tokens}, "format": "json", - "stream": stream + "stream": stream, } response = requests.post("http://localhost:11434/api/chat", json=payload) - + output = response.json()["message"]["content"] return output except Exception as e: logger.error(f"LLM error: {e}") - raise Exception(f"LLM error: {e}") from e \ No newline at end of file + raise Exception(f"LLM error: {e}") from e -- GitLab