From 7873bfb03032b1de166306b74d706d470b88df13 Mon Sep 17 00:00:00 2001
From: "Huu Le (Lee)" <39040748+leehuwuj@users.noreply.github.com>
Date: Fri, 17 May 2024 17:01:06 +0700
Subject: [PATCH] chore: Add Ollama API base URL environment variable (#91)

---
 .changeset/tough-boats-shake.md                          | 5 +++++
 helpers/env-variables.ts                                 | 9 +++++++++
 .../streaming/express/src/controllers/engine/settings.ts | 6 ++++++
 templates/types/streaming/fastapi/app/settings.py        | 8 ++++++--
 .../streaming/nextjs/app/api/chat/engine/settings.ts     | 5 +++++
 5 files changed, 31 insertions(+), 2 deletions(-)
 create mode 100644 .changeset/tough-boats-shake.md

diff --git a/.changeset/tough-boats-shake.md b/.changeset/tough-boats-shake.md
new file mode 100644
index 00000000..d604316d
--- /dev/null
+++ b/.changeset/tough-boats-shake.md
@@ -0,0 +1,5 @@
+---
+"create-llama": patch
+---
+
+Update Ollama provider to run with the base URL from the environment variable
diff --git a/helpers/env-variables.ts b/helpers/env-variables.ts
index 46d1f94e..3ee3c55c 100644
--- a/helpers/env-variables.ts
+++ b/helpers/env-variables.ts
@@ -219,6 +219,15 @@ const getModelEnvs = (modelConfig: ModelConfig): EnvVar[] => {
           },
         ]
       : []),
+    ...(modelConfig.provider === "ollama"
+      ? [
+          {
+            name: "OLLAMA_BASE_URL",
+            description:
+              "The base URL for the Ollama API. Eg: http://localhost:11434",
+          },
+        ]
+      : []),
   ];
 };
 
diff --git a/templates/types/streaming/express/src/controllers/engine/settings.ts b/templates/types/streaming/express/src/controllers/engine/settings.ts
index 6e980afb..27e946cf 100644
--- a/templates/types/streaming/express/src/controllers/engine/settings.ts
+++ b/templates/types/streaming/express/src/controllers/engine/settings.ts
@@ -56,11 +56,17 @@ function initOpenAI() {
 }
 
 function initOllama() {
+  const config = {
+    host: process.env.OLLAMA_BASE_URL ?? "http://localhost:11434",
+  };
+
   Settings.llm = new Ollama({
     model: process.env.MODEL ?? "",
+    config,
   });
   Settings.embedModel = new OllamaEmbedding({
     model: process.env.EMBEDDING_MODEL ?? "",
+    config,
   });
 }
 
diff --git a/templates/types/streaming/fastapi/app/settings.py b/templates/types/streaming/fastapi/app/settings.py
index e4225a40..57eab4a0 100644
--- a/templates/types/streaming/fastapi/app/settings.py
+++ b/templates/types/streaming/fastapi/app/settings.py
@@ -23,8 +23,12 @@ def init_ollama():
     from llama_index.llms.ollama import Ollama
     from llama_index.embeddings.ollama import OllamaEmbedding
 
-    Settings.embed_model = OllamaEmbedding(model_name=os.getenv("EMBEDDING_MODEL"))
-    Settings.llm = Ollama(model=os.getenv("MODEL"))
+    base_url = os.getenv("OLLAMA_BASE_URL") or "http://localhost:11434"
+    Settings.embed_model = OllamaEmbedding(
+        base_url=base_url,
+        model_name=os.getenv("EMBEDDING_MODEL"),
+    )
+    Settings.llm = Ollama(base_url=base_url, model=os.getenv("MODEL"))
 
 
 def init_openai():
diff --git a/templates/types/streaming/nextjs/app/api/chat/engine/settings.ts b/templates/types/streaming/nextjs/app/api/chat/engine/settings.ts
index 6e980afb..a6fdf3ef 100644
--- a/templates/types/streaming/nextjs/app/api/chat/engine/settings.ts
+++ b/templates/types/streaming/nextjs/app/api/chat/engine/settings.ts
@@ -56,11 +56,16 @@ function initOpenAI() {
 }
 
 function initOllama() {
+  const config = {
+    host: process.env.OLLAMA_BASE_URL ?? "http://localhost:11434",
+  };
   Settings.llm = new Ollama({
     model: process.env.MODEL ?? "",
+    config,
   });
   Settings.embedModel = new OllamaEmbedding({
     model: process.env.EMBEDDING_MODEL ?? "",
+    config,
   });
 }
 
-- 
GitLab