diff --git a/helpers/env-variables.ts b/helpers/env-variables.ts index b5b36dd7bdebe404d60b03d495d165d81d21614a..34fdc952aaf73132461516733859a35a3da1d14b 100644 --- a/helpers/env-variables.ts +++ b/helpers/env-variables.ts @@ -224,7 +224,7 @@ const getModelEnvs = (modelConfig: ModelConfig): EnvVar[] => { { name: "OLLAMA_BASE_URL", description: - "The base URL for the Ollama API. Eg: http://localhost:11434", + "The base URL for the Ollama API. Eg: http://127.0.0.1:11434", }, ] : []), diff --git a/helpers/proxy.ts b/helpers/proxy.ts index 2973926c2e773ceafc7898b583a0e8f4c81e4e23..a65d0704729a0d4591d1495e6379e0a697dba49b 100644 --- a/helpers/proxy.ts +++ b/helpers/proxy.ts @@ -4,7 +4,5 @@ export async function initializeGlobalAgent() { /* Dynamically import global-agent/bootstrap */ await import("global-agent/bootstrap"); console.log("Proxy enabled via global-agent."); - } else { - console.log("No proxy configuration found. Continuing without proxy."); } } diff --git a/templates/types/streaming/express/package.json b/templates/types/streaming/express/package.json index d61b7fba727c3aa3818695f30259538760d28a00..b8bb9a71dc95e7215ba2f4253a39f0155b624334 100644 --- a/templates/types/streaming/express/package.json +++ b/templates/types/streaming/express/package.json @@ -14,7 +14,7 @@ "cors": "^2.8.5", "dotenv": "^16.3.1", "express": "^4.18.2", - "llamaindex": "0.3.9", + "llamaindex": "0.3.13", "pdf2json": "3.0.5", "ajv": "^8.12.0" }, diff --git a/templates/types/streaming/express/src/controllers/engine/settings.ts b/templates/types/streaming/express/src/controllers/engine/settings.ts index 27e946cfff60a312f65e1e3c5c46b51070a1a15a..a46feb7e493df65a448c7039a760eb8f1a5bc875 100644 --- a/templates/types/streaming/express/src/controllers/engine/settings.ts +++ b/templates/types/streaming/express/src/controllers/engine/settings.ts @@ -57,7 +57,7 @@ function initOpenAI() { function initOllama() { const config = { - host: process.env.OLLAMA_BASE_URL ?? "http://localhost:11434", + host: process.env.OLLAMA_BASE_URL ?? "http://127.0.0.1:11434", }; Settings.llm = new Ollama({ diff --git a/templates/types/streaming/fastapi/app/settings.py b/templates/types/streaming/fastapi/app/settings.py index 57eab4a0bcca21cecf230fecffd9ae8edf8a035b..90cc091809e69b9b712c5d35c3049dfd54ae01e4 100644 --- a/templates/types/streaming/fastapi/app/settings.py +++ b/templates/types/streaming/fastapi/app/settings.py @@ -23,7 +23,7 @@ def init_ollama(): from llama_index.llms.ollama import Ollama from llama_index.embeddings.ollama import OllamaEmbedding - base_url = os.getenv("OLLAMA_BASE_URL") or "http://localhost:11434" + base_url = os.getenv("OLLAMA_BASE_URL") or "http://127.0.0.1:11434" Settings.embed_model = OllamaEmbedding( base_url=base_url, model_name=os.getenv("EMBEDDING_MODEL"), diff --git a/templates/types/streaming/nextjs/app/api/chat/engine/settings.ts b/templates/types/streaming/nextjs/app/api/chat/engine/settings.ts index a6fdf3ef754094a75f3a31cf2b7bf324da9eef01..9b7bb00e1cb7f74d9a6f35b0dfca0b134bdbd892 100644 --- a/templates/types/streaming/nextjs/app/api/chat/engine/settings.ts +++ b/templates/types/streaming/nextjs/app/api/chat/engine/settings.ts @@ -57,7 +57,7 @@ function initOpenAI() { function initOllama() { const config = { - host: process.env.OLLAMA_BASE_URL ?? "http://localhost:11434", + host: process.env.OLLAMA_BASE_URL ?? "http://127.0.0.1:11434", }; Settings.llm = new Ollama({ model: process.env.MODEL ?? "", diff --git a/templates/types/streaming/nextjs/package.json b/templates/types/streaming/nextjs/package.json index babe431916ee0db3705d25450cdabdff11654ebf..ad182f7309b24dd95ade6bb23156af32ee1946b4 100644 --- a/templates/types/streaming/nextjs/package.json +++ b/templates/types/streaming/nextjs/package.json @@ -18,7 +18,7 @@ "class-variance-authority": "^0.7.0", "clsx": "^2.1.1", "dotenv": "^16.3.1", - "llamaindex": "0.3.8", + "llamaindex": "0.3.13", "lucide-react": "^0.294.0", "next": "^14.0.3", "pdf2json": "3.0.5",