diff --git a/llama-index-integrations/llms/llama-index-llms-ollama/llama_index/llms/ollama/base.py b/llama-index-integrations/llms/llama-index-llms-ollama/llama_index/llms/ollama/base.py
index 73b44fe765082222bfd622bac95ec33bacb69d84..296401a86bb934f36903d0a80edde85221640bcf 100644
--- a/llama-index-integrations/llms/llama-index-llms-ollama/llama_index/llms/ollama/base.py
+++ b/llama-index-integrations/llms/llama-index-llms-ollama/llama_index/llms/ollama/base.py
@@ -1,46 +1,47 @@
-from ollama import Client, AsyncClient
 from typing import (
     TYPE_CHECKING,
     Any,
+    AsyncGenerator,
     Dict,
+    Generator,
     List,
     Optional,
     Sequence,
     Tuple,
     Type,
     Union,
-    Generator,
-    AsyncGenerator,
 )
 
+from ollama import AsyncClient, Client
+
 from llama_index.core.base.llms.generic_utils import (
-    chat_to_completion_decorator,
     achat_to_completion_decorator,
-    stream_chat_to_completion_decorator,
     astream_chat_to_completion_decorator,
+    chat_to_completion_decorator,
+    stream_chat_to_completion_decorator,
 )
 from llama_index.core.base.llms.types import (
     ChatMessage,
     ChatResponse,
-    ChatResponseGen,
     ChatResponseAsyncGen,
+    ChatResponseGen,
     CompletionResponse,
     CompletionResponseAsyncGen,
     CompletionResponseGen,
+    ImageBlock,
     LLMMetadata,
     MessageRole,
     TextBlock,
-    ImageBlock,
 )
 from llama_index.core.bridge.pydantic import BaseModel, Field, PrivateAttr
 from llama_index.core.constants import DEFAULT_CONTEXT_WINDOW, DEFAULT_NUM_OUTPUTS
 from llama_index.core.instrumentation import get_dispatcher
 from llama_index.core.llms.callbacks import llm_chat_callback, llm_completion_callback
 from llama_index.core.llms.function_calling import FunctionCallingLLM
+from llama_index.core.program.utils import process_streaming_objects
 from llama_index.core.prompts import PromptTemplate
 from llama_index.core.tools import ToolSelection
 from llama_index.core.types import PydanticProgramMode
-from llama_index.core.program.utils import process_streaming_objects
 
 if TYPE_CHECKING:
     from llama_index.core.tools.types import BaseTool
@@ -132,7 +133,7 @@ class Ollama(FunctionCallingLLM):
         base_url: str = "http://localhost:11434",
         temperature: float = 0.75,
         context_window: int = DEFAULT_CONTEXT_WINDOW,
-        request_timeout: float = DEFAULT_REQUEST_TIMEOUT,
+        request_timeout: Optional[float] = DEFAULT_REQUEST_TIMEOUT,
         prompt_key: str = "prompt",
         json_mode: bool = False,
         additional_kwargs: Dict[str, Any] = {},
diff --git a/llama-index-integrations/llms/llama-index-llms-ollama/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-ollama/pyproject.toml
index 9e15aacedd3a36d1a15d9b113dd14b82e3722502..ff3a9dd91eaec46affbf6eea81e2cb54f5e10474 100644
--- a/llama-index-integrations/llms/llama-index-llms-ollama/pyproject.toml
+++ b/llama-index-integrations/llms/llama-index-llms-ollama/pyproject.toml
@@ -27,7 +27,7 @@ exclude = ["**/BUILD"]
 license = "MIT"
 name = "llama-index-llms-ollama"
 readme = "README.md"
-version = "0.5.2"
+version = "0.5.3"
 
 [tool.poetry.dependencies]
 python = ">=3.9,<4.0"
diff --git a/llama-index-integrations/llms/llama-index-llms-ollama/tests/test_llms_ollama.py b/llama-index-integrations/llms/llama-index-llms-ollama/tests/test_llms_ollama.py
index d8c245880095f0061d7ccd6ef0ac9d7b77b247a1..5803fad6d7a75811125e0db2b95365ce2a986081 100644
--- a/llama-index-integrations/llms/llama-index-llms-ollama/tests/test_llms_ollama.py
+++ b/llama-index-integrations/llms/llama-index-llms-ollama/tests/test_llms_ollama.py
@@ -1,13 +1,13 @@
-import pytest
 import os
 
-from llama_index.core.bridge.pydantic import BaseModel
+import pytest
+from ollama import Client
+
 from llama_index.core.base.llms.base import BaseLLM
+from llama_index.core.bridge.pydantic import BaseModel
 from llama_index.core.llms import ChatMessage
 from llama_index.core.tools import FunctionTool
 from llama_index.llms.ollama import Ollama
-from ollama import Client
-
 
 test_model = os.environ.get("OLLAMA_TEST_MODEL", "llama3.1:latest")
 try:
@@ -16,14 +16,14 @@ try:
 
     model_found = False
     for model in models["models"]:
-        if model["name"] == test_model:
+        if model.model == test_model:
             model_found = True
             break
 
     if not model_found:
-        client = None
+        client = None  # type: ignore
 except Exception:
-    client = None
+    client = None  # type: ignore
 
 
 class Song(BaseModel):