diff --git a/server/utils/AiProviders/anthropic/index.js b/server/utils/AiProviders/anthropic/index.js index d5ee1f9d3d30965aad1a96af8903ab302ffe6f92..215fa5fbb862c9b01599492b6a209fe02c174d84 100644 --- a/server/utils/AiProviders/anthropic/index.js +++ b/server/utils/AiProviders/anthropic/index.js @@ -3,6 +3,7 @@ const { writeResponseChunk, clientAbortedHandler, } = require("../../helpers/chat/responses"); +const { NativeEmbedder } = require("../../EmbeddingEngines/native"); class AnthropicLLM { constructor(embedder = null, modelPreference = null) { @@ -23,11 +24,7 @@ class AnthropicLLM { user: this.promptWindowLimit() * 0.7, }; - if (!embedder) - throw new Error( - "INVALID ANTHROPIC SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Anthropic as your LLM." - ); - this.embedder = embedder; + this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.7; } diff --git a/server/utils/AiProviders/azureOpenAi/index.js b/server/utils/AiProviders/azureOpenAi/index.js index a2ab556dbdb2b831fd0541ffc0d8f0847f23a3cd..3678567e3f3dfb71b3d87847df9535452ad2697a 100644 --- a/server/utils/AiProviders/azureOpenAi/index.js +++ b/server/utils/AiProviders/azureOpenAi/index.js @@ -1,4 +1,4 @@ -const { AzureOpenAiEmbedder } = require("../../EmbeddingEngines/azureOpenAi"); +const { NativeEmbedder } = require("../../EmbeddingEngines/native"); const { writeResponseChunk, clientAbortedHandler, @@ -23,11 +23,7 @@ class AzureOpenAiLLM { user: this.promptWindowLimit() * 0.7, }; - if (!embedder) - console.warn( - "No embedding provider defined for AzureOpenAiLLM - falling back to AzureOpenAiEmbedder for embedding!" - ); - this.embedder = !embedder ? new AzureOpenAiEmbedder() : embedder; + this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.7; } diff --git a/server/utils/AiProviders/cohere/index.js b/server/utils/AiProviders/cohere/index.js index a97a15fcadff020261408945cd614af641addb19..6357d503e5ae8875e5643dc041102eaf9e3119d4 100644 --- a/server/utils/AiProviders/cohere/index.js +++ b/server/utils/AiProviders/cohere/index.js @@ -19,7 +19,8 @@ class CohereLLM { system: this.promptWindowLimit() * 0.15, user: this.promptWindowLimit() * 0.7, }; - this.embedder = !!embedder ? embedder : new NativeEmbedder(); + + this.embedder = embedder ?? new NativeEmbedder(); } #appendContext(contextTexts = []) { diff --git a/server/utils/AiProviders/gemini/index.js b/server/utils/AiProviders/gemini/index.js index 962b120136e7ebf7ac37670b62c21a3616bd6444..3dd307aff20f6b78b4bc6bf89f02bdd1b08a375b 100644 --- a/server/utils/AiProviders/gemini/index.js +++ b/server/utils/AiProviders/gemini/index.js @@ -1,3 +1,4 @@ +const { NativeEmbedder } = require("../../EmbeddingEngines/native"); const { writeResponseChunk, clientAbortedHandler, @@ -26,11 +27,7 @@ class GeminiLLM { user: this.promptWindowLimit() * 0.7, }; - if (!embedder) - throw new Error( - "INVALID GEMINI LLM SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Gemini as your LLM." - ); - this.embedder = embedder; + this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.7; // not used for Gemini } diff --git a/server/utils/AiProviders/genericOpenAi/index.js b/server/utils/AiProviders/genericOpenAi/index.js index 686d4c677759d58af263cb103a40234cf96f6279..dc0264e487a4aee74bdae2d8e4aa4e31e6397a8c 100644 --- a/server/utils/AiProviders/genericOpenAi/index.js +++ b/server/utils/AiProviders/genericOpenAi/index.js @@ -27,11 +27,7 @@ class GenericOpenAiLLM { user: this.promptWindowLimit() * 0.7, }; - if (!embedder) - console.warn( - "No embedding provider defined for GenericOpenAiLLM - falling back to NativeEmbedder for embedding!" - ); - this.embedder = !embedder ? new NativeEmbedder() : embedder; + this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.7; this.log(`Inference API: ${this.basePath} Model: ${this.model}`); } diff --git a/server/utils/AiProviders/groq/index.js b/server/utils/AiProviders/groq/index.js index 01d92f006930f3cc959d925588823861f97db77d..24a430e62ecef9b26102f76afd20fc967bacc137 100644 --- a/server/utils/AiProviders/groq/index.js +++ b/server/utils/AiProviders/groq/index.js @@ -20,7 +20,7 @@ class GroqLLM { user: this.promptWindowLimit() * 0.7, }; - this.embedder = !embedder ? new NativeEmbedder() : embedder; + this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.7; } diff --git a/server/utils/AiProviders/huggingface/index.js b/server/utils/AiProviders/huggingface/index.js index 6a79880c86f9b811a91cdefc424918cfdf68582c..d25725c2a0c226aec7d0eac82b4d309107f58202 100644 --- a/server/utils/AiProviders/huggingface/index.js +++ b/server/utils/AiProviders/huggingface/index.js @@ -1,5 +1,4 @@ const { NativeEmbedder } = require("../../EmbeddingEngines/native"); -const { OpenAiEmbedder } = require("../../EmbeddingEngines/openAi"); const { handleDefaultStreamResponseV2, } = require("../../helpers/chat/responses"); @@ -26,11 +25,7 @@ class HuggingFaceLLM { user: this.promptWindowLimit() * 0.7, }; - if (!embedder) - console.warn( - "No embedding provider defined for HuggingFaceLLM - falling back to Native for embedding!" - ); - this.embedder = !embedder ? new OpenAiEmbedder() : new NativeEmbedder(); + this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.2; } diff --git a/server/utils/AiProviders/koboldCPP/index.js b/server/utils/AiProviders/koboldCPP/index.js index 4b1ff3f61b027a506b7072b111b91c9430cce0fd..90186aabf46bbf48b40e06d7b4cc0440b3a29b68 100644 --- a/server/utils/AiProviders/koboldCPP/index.js +++ b/server/utils/AiProviders/koboldCPP/index.js @@ -26,11 +26,7 @@ class KoboldCPPLLM { user: this.promptWindowLimit() * 0.7, }; - if (!embedder) - console.warn( - "No embedding provider defined for KoboldCPPLLM - falling back to NativeEmbedder for embedding!" - ); - this.embedder = !embedder ? new NativeEmbedder() : embedder; + this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.7; this.log(`Inference API: ${this.basePath} Model: ${this.model}`); } diff --git a/server/utils/AiProviders/liteLLM/index.js b/server/utils/AiProviders/liteLLM/index.js index 5973826cc8e1f2e0ed008f60250a24da8e9826c8..6152ea29613b6b402d4a69beefbc337c16933fa5 100644 --- a/server/utils/AiProviders/liteLLM/index.js +++ b/server/utils/AiProviders/liteLLM/index.js @@ -26,11 +26,7 @@ class LiteLLM { user: this.promptWindowLimit() * 0.7, }; - if (!embedder) - console.warn( - "No embedding provider defined for LiteLLM - falling back to NativeEmbedder for embedding!" - ); - this.embedder = !embedder ? new NativeEmbedder() : embedder; + this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.7; this.log(`Inference API: ${this.basePath} Model: ${this.model}`); } diff --git a/server/utils/AiProviders/lmStudio/index.js b/server/utils/AiProviders/lmStudio/index.js index 48f689fbce25042855448ddb2de1397f7854bad4..6345fb04e09396495ad9567caa2c18984aadd706 100644 --- a/server/utils/AiProviders/lmStudio/index.js +++ b/server/utils/AiProviders/lmStudio/index.js @@ -1,3 +1,4 @@ +const { NativeEmbedder } = require("../../EmbeddingEngines/native"); const { handleDefaultStreamResponseV2, } = require("../../helpers/chat/responses"); @@ -27,11 +28,7 @@ class LMStudioLLM { user: this.promptWindowLimit() * 0.7, }; - if (!embedder) - throw new Error( - "INVALID LM STUDIO SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use LMStudio as your LLM." - ); - this.embedder = embedder; + this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.7; } diff --git a/server/utils/AiProviders/localAi/index.js b/server/utils/AiProviders/localAi/index.js index 5047752853f8c457e55726d94badfd30717a3ac0..8bc4d047d520f1a66ed896a13a9291f6698db30f 100644 --- a/server/utils/AiProviders/localAi/index.js +++ b/server/utils/AiProviders/localAi/index.js @@ -1,3 +1,4 @@ +const { NativeEmbedder } = require("../../EmbeddingEngines/native"); const { handleDefaultStreamResponseV2, } = require("../../helpers/chat/responses"); @@ -19,11 +20,7 @@ class LocalAiLLM { user: this.promptWindowLimit() * 0.7, }; - if (!embedder) - throw new Error( - "INVALID LOCAL AI SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use LocalAI as your LLM." - ); - this.embedder = embedder; + this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.7; } diff --git a/server/utils/AiProviders/mistral/index.js b/server/utils/AiProviders/mistral/index.js index 8410d4cb642bfe7bd2043a5c75ce953384327e2b..c2546299f0ee751df4362b2e1861a7fa6d40425d 100644 --- a/server/utils/AiProviders/mistral/index.js +++ b/server/utils/AiProviders/mistral/index.js @@ -1,3 +1,4 @@ +const { NativeEmbedder } = require("../../EmbeddingEngines/native"); const { handleDefaultStreamResponseV2, } = require("../../helpers/chat/responses"); @@ -20,11 +21,7 @@ class MistralLLM { user: this.promptWindowLimit() * 0.7, }; - if (!embedder) - console.warn( - "No embedding provider defined for MistralLLM - falling back to OpenAiEmbedder for embedding!" - ); - this.embedder = embedder; + this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.0; } diff --git a/server/utils/AiProviders/native/index.js b/server/utils/AiProviders/native/index.js index e13b68a2f834b4fc1a530c710496f6e411477b22..0cbd87476a56419c8ec37878ba506ca611810437 100644 --- a/server/utils/AiProviders/native/index.js +++ b/server/utils/AiProviders/native/index.js @@ -23,7 +23,7 @@ class NativeLLM { system: this.promptWindowLimit() * 0.15, user: this.promptWindowLimit() * 0.7, }; - this.embedder = embedder || new NativeEmbedder(); + this.embedder = embedder ?? new NativeEmbedder(); this.cacheDir = path.resolve( process.env.STORAGE_DIR ? path.resolve(process.env.STORAGE_DIR, "models", "downloaded") diff --git a/server/utils/AiProviders/ollama/index.js b/server/utils/AiProviders/ollama/index.js index 73269d6d2bac2974b9effc469a86e9a7baf7062b..609a7601b718534cc498eebd44b91d7cda92d320 100644 --- a/server/utils/AiProviders/ollama/index.js +++ b/server/utils/AiProviders/ollama/index.js @@ -3,6 +3,7 @@ const { writeResponseChunk, clientAbortedHandler, } = require("../../helpers/chat/responses"); +const { NativeEmbedder } = require("../../EmbeddingEngines/native"); // Docs: https://github.com/jmorganca/ollama/blob/main/docs/api.md class OllamaAILLM { @@ -18,11 +19,7 @@ class OllamaAILLM { user: this.promptWindowLimit() * 0.7, }; - if (!embedder) - throw new Error( - "INVALID OLLAMA SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Ollama as your LLM." - ); - this.embedder = embedder; + this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.7; } diff --git a/server/utils/AiProviders/openAi/index.js b/server/utils/AiProviders/openAi/index.js index 43d7c287406149221e7f7a3dfdbd4554a7449aa0..8037d23dc398dca758013cf19751130c1f58ee7b 100644 --- a/server/utils/AiProviders/openAi/index.js +++ b/server/utils/AiProviders/openAi/index.js @@ -1,4 +1,4 @@ -const { OpenAiEmbedder } = require("../../EmbeddingEngines/openAi"); +const { NativeEmbedder } = require("../../EmbeddingEngines/native"); const { handleDefaultStreamResponseV2, } = require("../../helpers/chat/responses"); @@ -18,11 +18,7 @@ class OpenAiLLM { user: this.promptWindowLimit() * 0.7, }; - if (!embedder) - console.warn( - "No embedding provider defined for OpenAiLLM - falling back to OpenAiEmbedder for embedding!" - ); - this.embedder = !embedder ? new OpenAiEmbedder() : embedder; + this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.7; } diff --git a/server/utils/AiProviders/openRouter/index.js b/server/utils/AiProviders/openRouter/index.js index a8301083565107f26cdb13aa26e3ad4d6a552e57..bb8cf447192cd408f39c7e3457aa25e93d0343a2 100644 --- a/server/utils/AiProviders/openRouter/index.js +++ b/server/utils/AiProviders/openRouter/index.js @@ -36,7 +36,7 @@ class OpenRouterLLM { user: this.promptWindowLimit() * 0.7, }; - this.embedder = !embedder ? new NativeEmbedder() : embedder; + this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.7; if (!fs.existsSync(cacheFolder)) diff --git a/server/utils/AiProviders/perplexity/index.js b/server/utils/AiProviders/perplexity/index.js index a17ec43f54cd85075e5492ea2d184bf39f221a5c..d3f50de75dfad0ef940f55706f1f98d9a25d8f2d 100644 --- a/server/utils/AiProviders/perplexity/index.js +++ b/server/utils/AiProviders/perplexity/index.js @@ -28,7 +28,7 @@ class PerplexityLLM { user: this.promptWindowLimit() * 0.7, }; - this.embedder = !embedder ? new NativeEmbedder() : embedder; + this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.7; } diff --git a/server/utils/AiProviders/textGenWebUI/index.js b/server/utils/AiProviders/textGenWebUI/index.js index dfce76a5a5ec50dd40ae202981c6f493ef91f455..72f116f8564a3f4accb1950168a4c412e63d3a6d 100644 --- a/server/utils/AiProviders/textGenWebUI/index.js +++ b/server/utils/AiProviders/textGenWebUI/index.js @@ -23,7 +23,7 @@ class TextGenWebUILLM { user: this.promptWindowLimit() * 0.7, }; - this.embedder = !embedder ? new NativeEmbedder() : embedder; + this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.7; this.log(`Inference API: ${this.basePath} Model: ${this.model}`); } diff --git a/server/utils/AiProviders/togetherAi/index.js b/server/utils/AiProviders/togetherAi/index.js index 577a4b7427c3720ee9dad1efe641e023f2c33ba0..cdfef339714238557537ecd051be1ebe9ea53160 100644 --- a/server/utils/AiProviders/togetherAi/index.js +++ b/server/utils/AiProviders/togetherAi/index.js @@ -1,3 +1,4 @@ +const { NativeEmbedder } = require("../../EmbeddingEngines/native"); const { handleDefaultStreamResponseV2, } = require("../../helpers/chat/responses"); @@ -23,11 +24,7 @@ class TogetherAiLLM { user: this.promptWindowLimit() * 0.7, }; - if (!embedder) - throw new Error( - "INVALID TOGETHER AI SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Together AI as your LLM." - ); - this.embedder = embedder; + this.embedder = !embedder ? new NativeEmbedder() : embedder; this.defaultTemp = 0.7; }