Skip to content
Snippets Groups Projects
Unverified Commit 7873bfb0 authored by Huu Le (Lee)'s avatar Huu Le (Lee) Committed by GitHub
Browse files

chore: Add Ollama API base URL environment variable (#91)

parent 0c7c41ee
No related branches found
No related tags found
No related merge requests found
---
"create-llama": patch
---
Update Ollama provider to run with the base URL from the environment variable
......@@ -219,6 +219,15 @@ const getModelEnvs = (modelConfig: ModelConfig): EnvVar[] => {
},
]
: []),
...(modelConfig.provider === "ollama"
? [
{
name: "OLLAMA_BASE_URL",
description:
"The base URL for the Ollama API. Eg: http://localhost:11434",
},
]
: []),
];
};
......
......@@ -56,11 +56,17 @@ function initOpenAI() {
}
function initOllama() {
const config = {
host: process.env.OLLAMA_BASE_URL ?? "http://localhost:11434",
};
Settings.llm = new Ollama({
model: process.env.MODEL ?? "",
config,
});
Settings.embedModel = new OllamaEmbedding({
model: process.env.EMBEDDING_MODEL ?? "",
config,
});
}
......
......@@ -23,8 +23,12 @@ def init_ollama():
from llama_index.llms.ollama import Ollama
from llama_index.embeddings.ollama import OllamaEmbedding
Settings.embed_model = OllamaEmbedding(model_name=os.getenv("EMBEDDING_MODEL"))
Settings.llm = Ollama(model=os.getenv("MODEL"))
base_url = os.getenv("OLLAMA_BASE_URL") or "http://localhost:11434"
Settings.embed_model = OllamaEmbedding(
base_url=base_url,
model_name=os.getenv("EMBEDDING_MODEL"),
)
Settings.llm = Ollama(base_url=base_url, model=os.getenv("MODEL"))
def init_openai():
......
......@@ -56,11 +56,16 @@ function initOpenAI() {
}
function initOllama() {
const config = {
host: process.env.OLLAMA_BASE_URL ?? "http://localhost:11434",
};
Settings.llm = new Ollama({
model: process.env.MODEL ?? "",
config,
});
Settings.embedModel = new OllamaEmbedding({
model: process.env.EMBEDDING_MODEL ?? "",
config,
});
}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment