Skip to content
Snippets Groups Projects
Unverified Commit 38f29a3e authored by Matt Speck's avatar Matt Speck Committed by GitHub
Browse files

Make `request_timeout` in Ollama LLM optional (#18007)

* Made request_timeout in Ollama LLM optional

* fixed model check in test_llms_ollama.py

* fixed client type for test_llms_ollama.py
parent c963a9c7
No related branches found
No related tags found
No related merge requests found
from ollama import Client, AsyncClient
from typing import (
TYPE_CHECKING,
Any,
AsyncGenerator,
Dict,
Generator,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
Generator,
AsyncGenerator,
)
from ollama import AsyncClient, Client
from llama_index.core.base.llms.generic_utils import (
chat_to_completion_decorator,
achat_to_completion_decorator,
stream_chat_to_completion_decorator,
astream_chat_to_completion_decorator,
chat_to_completion_decorator,
stream_chat_to_completion_decorator,
)
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseGen,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponse,
CompletionResponseAsyncGen,
CompletionResponseGen,
ImageBlock,
LLMMetadata,
MessageRole,
TextBlock,
ImageBlock,
)
from llama_index.core.bridge.pydantic import BaseModel, Field, PrivateAttr
from llama_index.core.constants import DEFAULT_CONTEXT_WINDOW, DEFAULT_NUM_OUTPUTS
from llama_index.core.instrumentation import get_dispatcher
from llama_index.core.llms.callbacks import llm_chat_callback, llm_completion_callback
from llama_index.core.llms.function_calling import FunctionCallingLLM
from llama_index.core.program.utils import process_streaming_objects
from llama_index.core.prompts import PromptTemplate
from llama_index.core.tools import ToolSelection
from llama_index.core.types import PydanticProgramMode
from llama_index.core.program.utils import process_streaming_objects
if TYPE_CHECKING:
from llama_index.core.tools.types import BaseTool
......@@ -132,7 +133,7 @@ class Ollama(FunctionCallingLLM):
base_url: str = "http://localhost:11434",
temperature: float = 0.75,
context_window: int = DEFAULT_CONTEXT_WINDOW,
request_timeout: float = DEFAULT_REQUEST_TIMEOUT,
request_timeout: Optional[float] = DEFAULT_REQUEST_TIMEOUT,
prompt_key: str = "prompt",
json_mode: bool = False,
additional_kwargs: Dict[str, Any] = {},
......
......@@ -27,7 +27,7 @@ exclude = ["**/BUILD"]
license = "MIT"
name = "llama-index-llms-ollama"
readme = "README.md"
version = "0.5.2"
version = "0.5.3"
[tool.poetry.dependencies]
python = ">=3.9,<4.0"
......
import pytest
import os
from llama_index.core.bridge.pydantic import BaseModel
import pytest
from ollama import Client
from llama_index.core.base.llms.base import BaseLLM
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import FunctionTool
from llama_index.llms.ollama import Ollama
from ollama import Client
test_model = os.environ.get("OLLAMA_TEST_MODEL", "llama3.1:latest")
try:
......@@ -16,14 +16,14 @@ try:
model_found = False
for model in models["models"]:
if model["name"] == test_model:
if model.model == test_model:
model_found = True
break
if not model_found:
client = None
client = None # type: ignore
except Exception:
client = None
client = None # type: ignore
class Song(BaseModel):
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment