diff --git a/.changeset/tough-boats-shake.md b/.changeset/tough-boats-shake.md
new file mode 100644
index 0000000000000000000000000000000000000000..d604316d253fe4e78a247125fd1441b21ce83fbe
--- /dev/null
+++ b/.changeset/tough-boats-shake.md
@@ -0,0 +1,5 @@
+---
+"create-llama": patch
+---
+
+Update Ollama provider to run with the base URL from the environment variable
diff --git a/helpers/env-variables.ts b/helpers/env-variables.ts
index 46d1f94eef95087407d5eb44d67cbda49e411cfe..3ee3c55cb6413e02d25f0e00d24476f1833a963c 100644
--- a/helpers/env-variables.ts
+++ b/helpers/env-variables.ts
@@ -219,6 +219,15 @@ const getModelEnvs = (modelConfig: ModelConfig): EnvVar[] => {
           },
         ]
       : []),
+    ...(modelConfig.provider === "ollama"
+      ? [
+          {
+            name: "OLLAMA_BASE_URL",
+            description:
+              "The base URL for the Ollama API. Eg: http://localhost:11434",
+          },
+        ]
+      : []),
   ];
 };
 
diff --git a/templates/types/streaming/express/src/controllers/engine/settings.ts b/templates/types/streaming/express/src/controllers/engine/settings.ts
index 6e980afbb6137f65d7e3c6cf6b2720a11897024c..27e946cfff60a312f65e1e3c5c46b51070a1a15a 100644
--- a/templates/types/streaming/express/src/controllers/engine/settings.ts
+++ b/templates/types/streaming/express/src/controllers/engine/settings.ts
@@ -56,11 +56,17 @@ function initOpenAI() {
 }
 
 function initOllama() {
+  const config = {
+    host: process.env.OLLAMA_BASE_URL ?? "http://localhost:11434",
+  };
+
   Settings.llm = new Ollama({
     model: process.env.MODEL ?? "",
+    config,
   });
   Settings.embedModel = new OllamaEmbedding({
     model: process.env.EMBEDDING_MODEL ?? "",
+    config,
   });
 }
 
diff --git a/templates/types/streaming/fastapi/app/settings.py b/templates/types/streaming/fastapi/app/settings.py
index e4225a40141126d935738b8d9fb305865545170a..57eab4a0bcca21cecf230fecffd9ae8edf8a035b 100644
--- a/templates/types/streaming/fastapi/app/settings.py
+++ b/templates/types/streaming/fastapi/app/settings.py
@@ -23,8 +23,12 @@ def init_ollama():
     from llama_index.llms.ollama import Ollama
     from llama_index.embeddings.ollama import OllamaEmbedding
 
-    Settings.embed_model = OllamaEmbedding(model_name=os.getenv("EMBEDDING_MODEL"))
-    Settings.llm = Ollama(model=os.getenv("MODEL"))
+    base_url = os.getenv("OLLAMA_BASE_URL") or "http://localhost:11434"
+    Settings.embed_model = OllamaEmbedding(
+        base_url=base_url,
+        model_name=os.getenv("EMBEDDING_MODEL"),
+    )
+    Settings.llm = Ollama(base_url=base_url, model=os.getenv("MODEL"))
 
 
 def init_openai():
diff --git a/templates/types/streaming/nextjs/app/api/chat/engine/settings.ts b/templates/types/streaming/nextjs/app/api/chat/engine/settings.ts
index 6e980afbb6137f65d7e3c6cf6b2720a11897024c..a6fdf3ef754094a75f3a31cf2b7bf324da9eef01 100644
--- a/templates/types/streaming/nextjs/app/api/chat/engine/settings.ts
+++ b/templates/types/streaming/nextjs/app/api/chat/engine/settings.ts
@@ -56,11 +56,16 @@ function initOpenAI() {
 }
 
 function initOllama() {
+  const config = {
+    host: process.env.OLLAMA_BASE_URL ?? "http://localhost:11434",
+  };
   Settings.llm = new Ollama({
     model: process.env.MODEL ?? "",
+    config,
   });
   Settings.embedModel = new OllamaEmbedding({
     model: process.env.EMBEDDING_MODEL ?? "",
+    config,
   });
 }