Skip to content
Snippets Groups Projects
Unverified Commit b5ac62ff authored by Sean Hatfield's avatar Sean Hatfield Committed by GitHub
Browse files

[FEAT] Auto detection for Ollama and LMStudio (#1756)


* add ollama automatic url detection in llm and embedder prefrences

* implement auto detection for lmstudio llm and embedder/improve performance of checking common urls

* fix modal not clearing

* fix lmstudio url check

* improve ux for ollama llm provider option

* improve ux for lm studio llm provider option

* improve ux for ollama embedder option

* improve ux for lmstudio embedder option

* ux improvement lmstudio embedder options

* refactor implementation to hook and use native timeout
Swap to promise.any for resolving of available endpoints

* implement useProviderEndpointAutoDiscovery hook for lmstudio and ollama provider options

---------

Co-authored-by: default avatartimothycarambat <rambat1010@gmail.com>
parent fa4ab0f6
No related branches found
Tags v0.1.0.dev1
No related merge requests found
import React, { useEffect, useState } from "react"; import React, { useEffect, useState } from "react";
import System from "@/models/system"; import System from "@/models/system";
import PreLoader from "@/components/Preloader";
import { LMSTUDIO_COMMON_URLS } from "@/utils/constants";
import { CaretDown, CaretUp } from "@phosphor-icons/react";
import useProviderEndpointAutoDiscovery from "@/hooks/useProviderEndpointAutoDiscovery";
export default function LMStudioEmbeddingOptions({ settings }) { export default function LMStudioEmbeddingOptions({ settings }) {
const [basePathValue, setBasePathValue] = useState( const {
settings?.EmbeddingBasePath autoDetecting: loading,
basePath,
basePathValue,
showAdvancedControls,
setShowAdvancedControls,
handleAutoDetectClick,
} = useProviderEndpointAutoDiscovery({
provider: "lmstudio",
initialBasePath: settings?.EmbeddingBasePath,
ENDPOINTS: LMSTUDIO_COMMON_URLS,
});
const [maxChunkLength, setMaxChunkLength] = useState(
settings?.EmbeddingModelMaxChunkLength || 8192
); );
const [basePath, setBasePath] = useState(settings?.EmbeddingBasePath);
const handleMaxChunkLengthChange = (e) => {
setMaxChunkLength(Number(e.target.value));
};
return ( return (
<div className="w-full flex flex-col gap-y-4"> <div className="w-full flex flex-col gap-y-4">
<div className="w-full flex items-center gap-4"> <div className="w-full flex items-start gap-4">
<div className="flex flex-col w-60"> <LMStudioModelSelection settings={settings} basePath={basePath.value} />
<label className="text-white text-sm font-semibold block mb-4">
LMStudio Base URL
</label>
<input
type="url"
name="EmbeddingBasePath"
className="bg-zinc-900 text-white placeholder-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="http://localhost:1234/v1"
defaultValue={settings?.EmbeddingBasePath}
onChange={(e) => setBasePathValue(e.target.value)}
onBlur={() => setBasePath(basePathValue)}
required={true}
autoComplete="off"
spellCheck={false}
/>
</div>
<LMStudioModelSelection settings={settings} basePath={basePath} />
<div className="flex flex-col w-60"> <div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4"> <label className="text-white text-sm font-semibold block mb-2">
Max embedding chunk length Max Embedding Chunk Length
</label> </label>
<input <input
type="number" type="number"
name="EmbeddingModelMaxChunkLength" name="EmbeddingModelMaxChunkLength"
className="bg-zinc-900 text-white placeholder-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5" className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="8192" placeholder="8192"
min={1} min={1}
value={maxChunkLength}
onChange={handleMaxChunkLengthChange}
onScroll={(e) => e.target.blur()} onScroll={(e) => e.target.blur()}
defaultValue={settings?.EmbeddingModelMaxChunkLength} required={true}
required={false}
autoComplete="off" autoComplete="off"
/> />
<p className="text-xs leading-[18px] font-base text-white text-opacity-60 mt-2">
Maximum length of text chunks for embedding.
</p>
</div>
</div>
<div className="flex justify-start mt-4">
<button
onClick={(e) => {
e.preventDefault();
setShowAdvancedControls(!showAdvancedControls);
}}
className="text-white hover:text-white/70 flex items-center text-sm"
>
{showAdvancedControls ? "Hide" : "Show"} Manual Endpoint Input
{showAdvancedControls ? (
<CaretUp size={14} className="ml-1" />
) : (
<CaretDown size={14} className="ml-1" />
)}
</button>
</div>
<div hidden={!showAdvancedControls}>
<div className="w-full flex items-start gap-4">
<div className="flex flex-col w-60">
<div className="flex justify-between items-center mb-2">
<label className="text-white text-sm font-semibold">
LM Studio Base URL
</label>
{loading ? (
<PreLoader size="6" />
) : (
<>
{!basePathValue.value && (
<button
onClick={handleAutoDetectClick}
className="bg-primary-button text-xs font-medium px-2 py-1 rounded-lg hover:bg-secondary hover:text-white shadow-[0_4px_14px_rgba(0,0,0,0.25)]"
>
Auto-Detect
</button>
)}
</>
)}
</div>
<input
type="url"
name="EmbeddingBasePath"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="http://localhost:1234/v1"
value={basePathValue.value}
required={true}
autoComplete="off"
spellCheck={false}
onChange={basePath.onChange}
onBlur={basePath.onBlur}
/>
<p className="text-xs leading-[18px] font-base text-white text-opacity-60 mt-2">
Enter the URL where LM Studio is running.
</p>
</div>
</div> </div>
</div> </div>
</div> </div>
...@@ -55,14 +119,23 @@ function LMStudioModelSelection({ settings, basePath = null }) { ...@@ -55,14 +119,23 @@ function LMStudioModelSelection({ settings, basePath = null }) {
useEffect(() => { useEffect(() => {
async function findCustomModels() { async function findCustomModels() {
if (!basePath || !basePath.includes("/v1")) { if (!basePath) {
setCustomModels([]); setCustomModels([]);
setLoading(false); setLoading(false);
return; return;
} }
setLoading(true); setLoading(true);
const { models } = await System.customModels("lmstudio", null, basePath); try {
setCustomModels(models || []); const { models } = await System.customModels(
"lmstudio",
null,
basePath
);
setCustomModels(models || []);
} catch (error) {
console.error("Failed to fetch custom models:", error);
setCustomModels([]);
}
setLoading(false); setLoading(false);
} }
findCustomModels(); findCustomModels();
...@@ -71,8 +144,8 @@ function LMStudioModelSelection({ settings, basePath = null }) { ...@@ -71,8 +144,8 @@ function LMStudioModelSelection({ settings, basePath = null }) {
if (loading || customModels.length == 0) { if (loading || customModels.length == 0) {
return ( return (
<div className="flex flex-col w-60"> <div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4"> <label className="text-white text-sm font-semibold block mb-2">
Chat Model Selection LM Studio Embedding Model
</label> </label>
<select <select
name="EmbeddingModelPref" name="EmbeddingModelPref"
...@@ -80,19 +153,23 @@ function LMStudioModelSelection({ settings, basePath = null }) { ...@@ -80,19 +153,23 @@ function LMStudioModelSelection({ settings, basePath = null }) {
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5" className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
> >
<option disabled={true} selected={true}> <option disabled={true} selected={true}>
{basePath?.includes("/v1") {!!basePath
? "-- loading available models --" ? "--loading available models--"
: "-- waiting for URL --"} : "Enter LM Studio URL first"}
</option> </option>
</select> </select>
<p className="text-xs leading-[18px] font-base text-white text-opacity-60 mt-2">
Select the LM Studio model for embeddings. Models will load after
entering a valid LM Studio URL.
</p>
</div> </div>
); );
} }
return ( return (
<div className="flex flex-col w-60"> <div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4"> <label className="text-white text-sm font-semibold block mb-2">
Chat Model Selection LM Studio Embedding Model
</label> </label>
<select <select
name="EmbeddingModelPref" name="EmbeddingModelPref"
...@@ -115,6 +192,9 @@ function LMStudioModelSelection({ settings, basePath = null }) { ...@@ -115,6 +192,9 @@ function LMStudioModelSelection({ settings, basePath = null }) {
</optgroup> </optgroup>
)} )}
</select> </select>
<p className="text-xs leading-[18px] font-base text-white text-opacity-60 mt-2">
Choose the LM Studio model you want to use for generating embeddings.
</p>
</div> </div>
); );
} }
import React, { useEffect, useState } from "react"; import React, { useEffect, useState } from "react";
import System from "@/models/system"; import System from "@/models/system";
import PreLoader from "@/components/Preloader";
import { OLLAMA_COMMON_URLS } from "@/utils/constants";
import { CaretDown, CaretUp } from "@phosphor-icons/react";
import useProviderEndpointAutoDiscovery from "@/hooks/useProviderEndpointAutoDiscovery";
export default function OllamaEmbeddingOptions({ settings }) { export default function OllamaEmbeddingOptions({ settings }) {
const [basePathValue, setBasePathValue] = useState( const {
settings?.EmbeddingBasePath autoDetecting: loading,
basePath,
basePathValue,
showAdvancedControls,
setShowAdvancedControls,
handleAutoDetectClick,
} = useProviderEndpointAutoDiscovery({
provider: "ollama",
initialBasePath: settings?.EmbeddingBasePath,
ENDPOINTS: OLLAMA_COMMON_URLS,
});
const [maxChunkLength, setMaxChunkLength] = useState(
settings?.EmbeddingModelMaxChunkLength || 8192
); );
const [basePath, setBasePath] = useState(settings?.EmbeddingBasePath);
const handleMaxChunkLengthChange = (e) => {
setMaxChunkLength(Number(e.target.value));
};
return ( return (
<div className="w-full flex flex-col gap-y-4"> <div className="w-full flex flex-col gap-y-4">
<div className="w-full flex items-center gap-4"> <div className="w-full flex items-start gap-4">
<div className="flex flex-col w-60"> <OllamaEmbeddingModelSelection
<label className="text-white text-sm font-semibold block mb-4"> settings={settings}
Ollama Base URL basePath={basePath.value}
</label> />
<input
type="url"
name="EmbeddingBasePath"
className="bg-zinc-900 text-white placeholder-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="http://127.0.0.1:11434"
defaultValue={settings?.EmbeddingBasePath}
onChange={(e) => setBasePathValue(e.target.value)}
onBlur={() => setBasePath(basePathValue)}
required={true}
autoComplete="off"
spellCheck={false}
/>
</div>
<OllamaLLMModelSelection settings={settings} basePath={basePath} />
<div className="flex flex-col w-60"> <div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4"> <label className="text-white text-sm font-semibold block mb-2">
Max embedding chunk length Max Embedding Chunk Length
</label> </label>
<input <input
type="number" type="number"
name="EmbeddingModelMaxChunkLength" name="EmbeddingModelMaxChunkLength"
className="bg-zinc-900 text-white placeholder-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5" className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="8192" placeholder="8192"
min={1} min={1}
value={maxChunkLength}
onChange={handleMaxChunkLengthChange}
onScroll={(e) => e.target.blur()} onScroll={(e) => e.target.blur()}
defaultValue={settings?.EmbeddingModelMaxChunkLength} required={true}
required={false}
autoComplete="off" autoComplete="off"
/> />
<p className="text-xs leading-[18px] font-base text-white text-opacity-60 mt-2">
Maximum length of text chunks for embedding.
</p>
</div>
</div>
<div className="flex justify-start mt-4">
<button
onClick={(e) => {
e.preventDefault();
setShowAdvancedControls(!showAdvancedControls);
}}
className="text-white hover:text-white/70 flex items-center text-sm"
>
{showAdvancedControls ? "Hide" : "Show"} Manual Endpoint Input
{showAdvancedControls ? (
<CaretUp size={14} className="ml-1" />
) : (
<CaretDown size={14} className="ml-1" />
)}
</button>
</div>
<div hidden={!showAdvancedControls}>
<div className="w-full flex items-start gap-4">
<div className="flex flex-col w-60">
<div className="flex justify-between items-center mb-2">
<label className="text-white text-sm font-semibold">
Ollama Base URL
</label>
{loading ? (
<PreLoader size="6" />
) : (
<>
{!basePathValue.value && (
<button
onClick={handleAutoDetectClick}
className="bg-primary-button text-xs font-medium px-2 py-1 rounded-lg hover:bg-secondary hover:text-white shadow-[0_4px_14px_rgba(0,0,0,0.25)]"
>
Auto-Detect
</button>
)}
</>
)}
</div>
<input
type="url"
name="EmbeddingBasePath"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="http://127.0.0.1:11434"
value={basePathValue.value}
required={true}
autoComplete="off"
spellCheck={false}
onChange={basePath.onChange}
onBlur={basePath.onBlur}
/>
<p className="text-xs leading-[18px] font-base text-white text-opacity-60 mt-2">
Enter the URL where Ollama is running.
</p>
</div>
</div> </div>
</div> </div>
</div> </div>
); );
} }
function OllamaLLMModelSelection({ settings, basePath = null }) { function OllamaEmbeddingModelSelection({ settings, basePath = null }) {
const [customModels, setCustomModels] = useState([]); const [customModels, setCustomModels] = useState([]);
const [loading, setLoading] = useState(true); const [loading, setLoading] = useState(true);
...@@ -61,8 +128,13 @@ function OllamaLLMModelSelection({ settings, basePath = null }) { ...@@ -61,8 +128,13 @@ function OllamaLLMModelSelection({ settings, basePath = null }) {
return; return;
} }
setLoading(true); setLoading(true);
const { models } = await System.customModels("ollama", null, basePath); try {
setCustomModels(models || []); const { models } = await System.customModels("ollama", null, basePath);
setCustomModels(models || []);
} catch (error) {
console.error("Failed to fetch custom models:", error);
setCustomModels([]);
}
setLoading(false); setLoading(false);
} }
findCustomModels(); findCustomModels();
...@@ -71,33 +143,37 @@ function OllamaLLMModelSelection({ settings, basePath = null }) { ...@@ -71,33 +143,37 @@ function OllamaLLMModelSelection({ settings, basePath = null }) {
if (loading || customModels.length == 0) { if (loading || customModels.length == 0) {
return ( return (
<div className="flex flex-col w-60"> <div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4"> <label className="text-white text-sm font-semibold block mb-2">
Embedding Model Selection Ollama Embedding Model
</label> </label>
<select <select
name="EmbeddingModelPref" name="EmbeddingModelPref"
disabled={true} disabled={true}
className="bg-zinc-900 border border-gray-500 text-white text-sm rounded-lg block w-full p-2.5" className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
> >
<option disabled={true} selected={true}> <option disabled={true} selected={true}>
{!!basePath {!!basePath
? "-- loading available models --" ? "--loading available models--"
: "-- waiting for URL --"} : "Enter Ollama URL first"}
</option> </option>
</select> </select>
<p className="text-xs leading-[18px] font-base text-white text-opacity-60 mt-2">
Select the Ollama model for embeddings. Models will load after
entering a valid Ollama URL.
</p>
</div> </div>
); );
} }
return ( return (
<div className="flex flex-col w-60"> <div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4"> <label className="text-white text-sm font-semibold block mb-2">
Embedding Model Selection Ollama Embedding Model
</label> </label>
<select <select
name="EmbeddingModelPref" name="EmbeddingModelPref"
required={true} required={true}
className="bg-zinc-900 border border-gray-500 text-white text-sm rounded-lg block w-full p-2.5" className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
> >
{customModels.length > 0 && ( {customModels.length > 0 && (
<optgroup label="Your loaded models"> <optgroup label="Your loaded models">
...@@ -115,6 +191,9 @@ function OllamaLLMModelSelection({ settings, basePath = null }) { ...@@ -115,6 +191,9 @@ function OllamaLLMModelSelection({ settings, basePath = null }) {
</optgroup> </optgroup>
)} )}
</select> </select>
<p className="text-xs leading-[18px] font-base text-white text-opacity-60 mt-2">
Choose the Ollama model you want to use for generating embeddings.
</p>
</div> </div>
); );
} }
import { useEffect, useState } from "react"; import { useEffect, useState } from "react";
import { Info } from "@phosphor-icons/react"; import { Info, CaretDown, CaretUp } from "@phosphor-icons/react";
import paths from "@/utils/paths"; import paths from "@/utils/paths";
import System from "@/models/system"; import System from "@/models/system";
import PreLoader from "@/components/Preloader";
import { LMSTUDIO_COMMON_URLS } from "@/utils/constants";
import useProviderEndpointAutoDiscovery from "@/hooks/useProviderEndpointAutoDiscovery";
export default function LMStudioOptions({ settings, showAlert = false }) { export default function LMStudioOptions({ settings, showAlert = false }) {
const [basePathValue, setBasePathValue] = useState( const {
settings?.LMStudioBasePath autoDetecting: loading,
basePath,
basePathValue,
showAdvancedControls,
setShowAdvancedControls,
handleAutoDetectClick,
} = useProviderEndpointAutoDiscovery({
provider: "lmstudio",
initialBasePath: settings?.LMStudioBasePath,
ENDPOINTS: LMSTUDIO_COMMON_URLS,
});
const [maxTokens, setMaxTokens] = useState(
settings?.LMStudioTokenLimit || 4096
); );
const [basePath, setBasePath] = useState(settings?.LMStudioBasePath);
const handleMaxTokensChange = (e) => {
setMaxTokens(Number(e.target.value));
};
return ( return (
<div className="w-full flex flex-col"> <div className="w-full flex flex-col">
...@@ -28,45 +47,86 @@ export default function LMStudioOptions({ settings, showAlert = false }) { ...@@ -28,45 +47,86 @@ export default function LMStudioOptions({ settings, showAlert = false }) {
</a> </a>
</div> </div>
)} )}
<div className="w-full flex items-center gap-4"> <div className="w-full flex items-start gap-4">
<LMStudioModelSelection settings={settings} basePath={basePath.value} />
<div className="flex flex-col w-60"> <div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4"> <label className="text-white text-sm font-semibold block mb-2">
LMStudio Base URL Max Tokens
</label> </label>
<input <input
type="url" type="number"
name="LMStudioBasePath" name="LMStudioTokenLimit"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5" className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="http://localhost:1234/v1" placeholder="4096"
defaultValue={settings?.LMStudioBasePath} defaultChecked="4096"
min={1}
value={maxTokens}
onChange={handleMaxTokensChange}
onScroll={(e) => e.target.blur()}
required={true} required={true}
autoComplete="off" autoComplete="off"
spellCheck={false}
onChange={(e) => setBasePathValue(e.target.value)}
onBlur={() => setBasePath(basePathValue)}
/> />
<p className="text-xs leading-[18px] font-base text-white text-opacity-60 mt-2">
Maximum number of tokens for context and response.
</p>
</div> </div>
{!settings?.credentialsOnly && ( </div>
<> <div className="flex justify-start mt-4">
<LMStudioModelSelection settings={settings} basePath={basePath} /> <button
<div className="flex flex-col w-60"> onClick={(e) => {
<label className="text-white text-sm font-semibold block mb-4"> e.preventDefault();
Token context window setShowAdvancedControls(!showAdvancedControls);
}}
className="text-white hover:text-white/70 flex items-center text-sm"
>
{showAdvancedControls ? "Hide" : "Show"} Manual Endpoint Input
{showAdvancedControls ? (
<CaretUp size={14} className="ml-1" />
) : (
<CaretDown size={14} className="ml-1" />
)}
</button>
</div>
<div hidden={!showAdvancedControls}>
<div className="w-full flex items-start gap-4 mt-4">
<div className="flex flex-col w-60">
<div className="flex justify-between items-center mb-2">
<label className="text-white text-sm font-semibold">
LM Studio Base URL
</label> </label>
<input {loading ? (
type="number" <PreLoader size="6" />
name="LMStudioTokenLimit" ) : (
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5" <>
placeholder="4096" {!basePathValue.value && (
min={1} <button
onScroll={(e) => e.target.blur()} onClick={handleAutoDetectClick}
defaultValue={settings?.LMStudioTokenLimit} className="bg-primary-button text-xs font-medium px-2 py-1 rounded-lg hover:bg-secondary hover:text-white shadow-[0_4px_14px_rgba(0,0,0,0.25)]"
required={true} >
autoComplete="off" Auto-Detect
/> </button>
)}
</>
)}
</div> </div>
</> <input
)} type="url"
name="LMStudioBasePath"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="http://localhost:1234/v1"
value={basePathValue.value}
required={true}
autoComplete="off"
spellCheck={false}
onChange={basePath.onChange}
onBlur={basePath.onBlur}
/>
<p className="text-xs leading-[18px] font-base text-white text-opacity-60 mt-2">
Enter the URL where LM Studio is running.
</p>
</div>
</div>
</div> </div>
</div> </div>
); );
...@@ -78,14 +138,23 @@ function LMStudioModelSelection({ settings, basePath = null }) { ...@@ -78,14 +138,23 @@ function LMStudioModelSelection({ settings, basePath = null }) {
useEffect(() => { useEffect(() => {
async function findCustomModels() { async function findCustomModels() {
if (!basePath || !basePath.includes("/v1")) { if (!basePath) {
setCustomModels([]); setCustomModels([]);
setLoading(false); setLoading(false);
return; return;
} }
setLoading(true); setLoading(true);
const { models } = await System.customModels("lmstudio", null, basePath); try {
setCustomModels(models || []); const { models } = await System.customModels(
"lmstudio",
null,
basePath
);
setCustomModels(models || []);
} catch (error) {
console.error("Failed to fetch custom models:", error);
setCustomModels([]);
}
setLoading(false); setLoading(false);
} }
findCustomModels(); findCustomModels();
...@@ -94,8 +163,8 @@ function LMStudioModelSelection({ settings, basePath = null }) { ...@@ -94,8 +163,8 @@ function LMStudioModelSelection({ settings, basePath = null }) {
if (loading || customModels.length == 0) { if (loading || customModels.length == 0) {
return ( return (
<div className="flex flex-col w-60"> <div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4"> <label className="text-white text-sm font-semibold block mb-2">
Chat Model Selection LM Studio Model
</label> </label>
<select <select
name="LMStudioModelPref" name="LMStudioModelPref"
...@@ -103,19 +172,23 @@ function LMStudioModelSelection({ settings, basePath = null }) { ...@@ -103,19 +172,23 @@ function LMStudioModelSelection({ settings, basePath = null }) {
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5" className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
> >
<option disabled={true} selected={true}> <option disabled={true} selected={true}>
{basePath?.includes("/v1") {!!basePath
? "-- loading available models --" ? "--loading available models--"
: "-- waiting for URL --"} : "Enter LM Studio URL first"}
</option> </option>
</select> </select>
<p className="text-xs leading-[18px] font-base text-white text-opacity-60 mt-2">
Select the LM Studio model you want to use. Models will load after
entering a valid LM Studio URL.
</p>
</div> </div>
); );
} }
return ( return (
<div className="flex flex-col w-60"> <div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4"> <label className="text-white text-sm font-semibold block mb-2">
Chat Model Selection LM Studio Model
</label> </label>
<select <select
name="LMStudioModelPref" name="LMStudioModelPref"
...@@ -138,6 +211,9 @@ function LMStudioModelSelection({ settings, basePath = null }) { ...@@ -138,6 +211,9 @@ function LMStudioModelSelection({ settings, basePath = null }) {
</optgroup> </optgroup>
)} )}
</select> </select>
<p className="text-xs leading-[18px] font-base text-white text-opacity-60 mt-2">
Choose the LM Studio model you want to use for your conversations.
</p>
</div> </div>
); );
} }
import { useEffect, useState } from "react"; import React, { useEffect, useState } from "react";
import System from "@/models/system"; import System from "@/models/system";
import PreLoader from "@/components/Preloader";
import { OLLAMA_COMMON_URLS } from "@/utils/constants";
import { CaretDown, CaretUp } from "@phosphor-icons/react";
import useProviderEndpointAutoDiscovery from "@/hooks/useProviderEndpointAutoDiscovery";
export default function OllamaLLMOptions({ settings }) { export default function OllamaLLMOptions({ settings }) {
const [basePathValue, setBasePathValue] = useState( const {
settings?.OllamaLLMBasePath autoDetecting: loading,
basePath,
basePathValue,
showAdvancedControls,
setShowAdvancedControls,
handleAutoDetectClick,
} = useProviderEndpointAutoDiscovery({
provider: "ollama",
initialBasePath: settings?.OllamaLLMBasePath,
ENDPOINTS: OLLAMA_COMMON_URLS,
});
const [maxTokens, setMaxTokens] = useState(
settings?.OllamaLLMTokenLimit || 4096
); );
const [basePath, setBasePath] = useState(settings?.OllamaLLMBasePath);
const handleMaxTokensChange = (e) => {
setMaxTokens(Number(e.target.value));
};
return ( return (
<div className="w-full flex flex-col gap-y-4"> <div className="w-full flex flex-col gap-y-4">
<div className="w-full flex items-center gap-4"> <div className="w-full flex items-start gap-4">
<OllamaLLMModelSelection
settings={settings}
basePath={basePath.value}
/>
<div className="flex flex-col w-60"> <div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4"> <label className="text-white text-sm font-semibold block mb-2">
Ollama Base URL Max Tokens
</label> </label>
<input <input
type="url" type="number"
name="OllamaLLMBasePath" name="OllamaLLMTokenLimit"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5" className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="http://127.0.0.1:11434" placeholder="4096"
defaultValue={settings?.OllamaLLMBasePath} defaultChecked="4096"
min={1}
value={maxTokens}
onChange={handleMaxTokensChange}
onScroll={(e) => e.target.blur()}
required={true} required={true}
autoComplete="off" autoComplete="off"
spellCheck={false}
onChange={(e) => setBasePathValue(e.target.value)}
onBlur={() => setBasePath(basePathValue)}
/> />
<p className="text-xs leading-[18px] font-base text-white text-opacity-60 mt-2">
Maximum number of tokens for context and response.
</p>
</div> </div>
{!settings?.credentialsOnly && ( </div>
<> <div className="flex justify-start mt-4">
<OllamaLLMModelSelection settings={settings} basePath={basePath} /> <button
<div className="flex flex-col w-60"> onClick={(e) => {
<label className="text-white text-sm font-semibold block mb-4"> e.preventDefault();
Token context window setShowAdvancedControls(!showAdvancedControls);
}}
className="text-white hover:text-white/70 flex items-center text-sm"
>
{showAdvancedControls ? "Hide" : "Show"} Manual Endpoint Input
{showAdvancedControls ? (
<CaretUp size={14} className="ml-1" />
) : (
<CaretDown size={14} className="ml-1" />
)}
</button>
</div>
<div hidden={!showAdvancedControls}>
<div className="w-full flex items-start gap-4">
<div className="flex flex-col w-60">
<div className="flex justify-between items-center mb-2">
<label className="text-white text-sm font-semibold">
Ollama Base URL
</label> </label>
<input {loading ? (
type="number" <PreLoader size="6" />
name="OllamaLLMTokenLimit" ) : (
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5" <>
placeholder="4096" {!basePathValue.value && (
min={1} <button
onScroll={(e) => e.target.blur()} onClick={handleAutoDetectClick}
defaultValue={settings?.OllamaLLMTokenLimit} className="bg-primary-button text-xs font-medium px-2 py-1 rounded-lg hover:bg-secondary hover:text-white shadow-[0_4px_14px_rgba(0,0,0,0.25)]"
required={true} >
autoComplete="off" Auto-Detect
/> </button>
)}
</>
)}
</div> </div>
</> <input
)} type="url"
name="OllamaLLMBasePath"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="http://127.0.0.1:11434"
value={basePathValue.value}
required={true}
autoComplete="off"
spellCheck={false}
onChange={basePath.onChange}
onBlur={basePath.onBlur}
/>
<p className="text-xs leading-[18px] font-base text-white text-opacity-60 mt-2">
Enter the URL where Ollama is running.
</p>
</div>
</div>
</div> </div>
</div> </div>
); );
...@@ -65,8 +129,13 @@ function OllamaLLMModelSelection({ settings, basePath = null }) { ...@@ -65,8 +129,13 @@ function OllamaLLMModelSelection({ settings, basePath = null }) {
return; return;
} }
setLoading(true); setLoading(true);
const { models } = await System.customModels("ollama", null, basePath); try {
setCustomModels(models || []); const { models } = await System.customModels("ollama", null, basePath);
setCustomModels(models || []);
} catch (error) {
console.error("Failed to fetch custom models:", error);
setCustomModels([]);
}
setLoading(false); setLoading(false);
} }
findCustomModels(); findCustomModels();
...@@ -75,8 +144,8 @@ function OllamaLLMModelSelection({ settings, basePath = null }) { ...@@ -75,8 +144,8 @@ function OllamaLLMModelSelection({ settings, basePath = null }) {
if (loading || customModels.length == 0) { if (loading || customModels.length == 0) {
return ( return (
<div className="flex flex-col w-60"> <div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4"> <label className="text-white text-sm font-semibold block mb-2">
Chat Model Selection Ollama Model
</label> </label>
<select <select
name="OllamaLLMModelPref" name="OllamaLLMModelPref"
...@@ -85,18 +154,22 @@ function OllamaLLMModelSelection({ settings, basePath = null }) { ...@@ -85,18 +154,22 @@ function OllamaLLMModelSelection({ settings, basePath = null }) {
> >
<option disabled={true} selected={true}> <option disabled={true} selected={true}>
{!!basePath {!!basePath
? "-- loading available models --" ? "--loading available models--"
: "-- waiting for URL --"} : "Enter Ollama URL first"}
</option> </option>
</select> </select>
<p className="text-xs leading-[18px] font-base text-white text-opacity-60 mt-2">
Select the Ollama model you want to use. Models will load after
entering a valid Ollama URL.
</p>
</div> </div>
); );
} }
return ( return (
<div className="flex flex-col w-60"> <div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4"> <label className="text-white text-sm font-semibold block mb-2">
Chat Model Selection Ollama Model
</label> </label>
<select <select
name="OllamaLLMModelPref" name="OllamaLLMModelPref"
...@@ -119,6 +192,9 @@ function OllamaLLMModelSelection({ settings, basePath = null }) { ...@@ -119,6 +192,9 @@ function OllamaLLMModelSelection({ settings, basePath = null }) {
</optgroup> </optgroup>
)} )}
</select> </select>
<p className="text-xs leading-[18px] font-base text-white text-opacity-60 mt-2">
Choose the Ollama model you want to use for your conversations.
</p>
</div> </div>
); );
} }
import { useEffect, useState } from "react";
import System from "@/models/system";
import showToast from "@/utils/toast";
export default function useProviderEndpointAutoDiscovery({
provider = null,
initialBasePath = "",
ENDPOINTS = [],
}) {
const [loading, setLoading] = useState(false);
const [basePath, setBasePath] = useState(initialBasePath);
const [basePathValue, setBasePathValue] = useState(initialBasePath);
const [autoDetectAttempted, setAutoDetectAttempted] = useState(false);
const [showAdvancedControls, setShowAdvancedControls] = useState(true);
async function autoDetect(isInitialAttempt = false) {
setLoading(true);
setAutoDetectAttempted(true);
const possibleEndpoints = [];
ENDPOINTS.forEach((endpoint) => {
possibleEndpoints.push(
new Promise((resolve, reject) => {
System.customModels(provider, null, endpoint, 2_000)
.then((results) => {
if (!results?.models || results.models.length === 0)
throw new Error("No models");
resolve({ endpoint, models: results.models });
})
.catch(() => {
reject(`${provider} @ ${endpoint} did not resolve.`);
});
})
);
});
const { endpoint, models } = await Promise.any(possibleEndpoints)
.then((resolved) => resolved)
.catch(() => {
console.error("All endpoints failed to resolve.");
return { endpoint: null, models: null };
});
if (models !== null) {
setBasePath(endpoint);
setBasePathValue(endpoint);
setLoading(false);
showToast("Provider endpoint discovered automatically.", "success", {
clear: true,
});
setShowAdvancedControls(false);
return;
}
setLoading(false);
setShowAdvancedControls(true);
showToast(
"Couldn't automatically discover the provider endpoint. Please enter it manually.",
"info",
{ clear: true }
);
}
function handleAutoDetectClick(e) {
e.preventDefault();
autoDetect();
}
function handleBasePathChange(e) {
const value = e.target.value;
setBasePathValue(value);
}
function handleBasePathBlur() {
setBasePath(basePathValue);
}
useEffect(() => {
if (!initialBasePath && !autoDetectAttempted) autoDetect(true);
}, [initialBasePath, autoDetectAttempted]);
return {
autoDetecting: loading,
autoDetectAttempted,
showAdvancedControls,
setShowAdvancedControls,
basePath: {
value: basePath,
set: setBasePathValue,
onChange: handleBasePathChange,
onBlur: handleBasePathBlur,
},
basePathValue: {
value: basePathValue,
set: setBasePathValue,
},
handleAutoDetectClick,
runAutoDetect: autoDetect,
};
}
...@@ -512,10 +512,23 @@ const System = { ...@@ -512,10 +512,23 @@ const System = {
return false; return false;
}); });
}, },
customModels: async function (provider, apiKey = null, basePath = null) { customModels: async function (
provider,
apiKey = null,
basePath = null,
timeout = null
) {
const controller = new AbortController();
if (!!timeout) {
setTimeout(() => {
controller.abort("Request timed out.");
}, timeout);
}
return fetch(`${API_BASE}/system/custom-models`, { return fetch(`${API_BASE}/system/custom-models`, {
method: "POST", method: "POST",
headers: baseHeaders(), headers: baseHeaders(),
signal: controller.signal,
body: JSON.stringify({ body: JSON.stringify({
provider, provider,
apiKey, apiKey,
......
...@@ -10,6 +10,19 @@ export const SEEN_WATCH_ALERT = "anythingllm_watched_document_alert"; ...@@ -10,6 +10,19 @@ export const SEEN_WATCH_ALERT = "anythingllm_watched_document_alert";
export const USER_BACKGROUND_COLOR = "bg-historical-msg-user"; export const USER_BACKGROUND_COLOR = "bg-historical-msg-user";
export const AI_BACKGROUND_COLOR = "bg-historical-msg-system"; export const AI_BACKGROUND_COLOR = "bg-historical-msg-system";
export const OLLAMA_COMMON_URLS = [
"http://127.0.0.1:11434",
"http://host.docker.internal:11434",
"http://172.17.0.1:11434",
];
export const LMSTUDIO_COMMON_URLS = [
"http://localhost:1234/v1",
"http://127.0.0.1:1234/v1",
"http://host.docker.internal:1234/v1",
"http://172.17.0.1:1234/v1",
];
export function fullApiUrl() { export function fullApiUrl() {
if (API_BASE !== "/api") return API_BASE; if (API_BASE !== "/api") return API_BASE;
return `${window.location.origin}/api`; return `${window.location.origin}/api`;
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment