diff --git a/docker/.env.example b/docker/.env.example
index aabc139f8cde49d4016edaf225fbea15559cd366..5130ddb746db498ec1ac3af2f47de7483b870f0d 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -66,6 +66,12 @@ GID='1000'
 # GROQ_API_KEY=gsk_abcxyz
 # GROQ_MODEL_PREF=llama2-70b-4096
 
+# LLM_PROVIDER='generic-openai'
+# GENERIC_OPEN_AI_BASE_PATH='http://proxy.url.openai.com/v1'
+# GENERIC_OPEN_AI_MODEL_PREF='gpt-3.5-turbo'
+# GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT=4096
+# GENERIC_OPEN_AI_API_KEY=sk-123abc
+
 ###########################################
 ######## Embedding API SElECTION ##########
 ###########################################
diff --git a/frontend/src/components/LLMSelection/GenericOpenAiOptions/index.jsx b/frontend/src/components/LLMSelection/GenericOpenAiOptions/index.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..456b504273e5c11998103613720064ef51c710b4
--- /dev/null
+++ b/frontend/src/components/LLMSelection/GenericOpenAiOptions/index.jsx
@@ -0,0 +1,70 @@
+export default function GenericOpenAiOptions({ settings }) {
+  return (
+    <div className="flex gap-4 flex-wrap">
+      <div className="flex flex-col w-60">
+        <label className="text-white text-sm font-semibold block mb-4">
+          Base URL
+        </label>
+        <input
+          type="url"
+          name="GenericOpenAiBasePath"
+          className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
+          placeholder="eg: https://proxy.openai.com"
+          defaultValue={settings?.GenericOpenAiBasePath}
+          required={true}
+          autoComplete="off"
+          spellCheck={false}
+        />
+      </div>
+      <div className="flex flex-col w-60">
+        <label className="text-white text-sm font-semibold block mb-4">
+          API Key
+        </label>
+        <input
+          type="password"
+          name="GenericOpenAiKey"
+          className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
+          placeholder="Generic service API Key"
+          defaultValue={settings?.GenericOpenAiKey ? "*".repeat(20) : ""}
+          required={false}
+          autoComplete="off"
+          spellCheck={false}
+        />
+      </div>
+      {!settings?.credentialsOnly && (
+        <>
+          <div className="flex flex-col w-60">
+            <label className="text-white text-sm font-semibold block mb-4">
+              Chat Model Name
+            </label>
+            <input
+              type="text"
+              name="GenericOpenAiModelPref"
+              className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
+              placeholder="Model id used for chat requests"
+              defaultValue={settings?.GenericOpenAiModelPref}
+              required={true}
+              autoComplete="off"
+            />
+          </div>
+          <div className="flex flex-col w-60">
+            <label className="text-white text-sm font-semibold block mb-4">
+              Token context window
+            </label>
+            <input
+              type="number"
+              name="GenericOpenAiTokenLimit"
+              className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
+              placeholder="Content window limit (eg: 4096)"
+              min={1}
+              onScroll={(e) => e.target.blur()}
+              defaultValue={settings?.GenericOpenAiTokenLimit}
+              required={true}
+              autoComplete="off"
+            />
+          </div>
+        </>
+      )}
+    </div>
+  );
+}
diff --git a/frontend/src/media/llmprovider/generic-openai.png b/frontend/src/media/llmprovider/generic-openai.png
new file mode 100644
index 0000000000000000000000000000000000000000..302f5dbee0aebe15a3fc4cd6073d58314c418487
Binary files /dev/null and b/frontend/src/media/llmprovider/generic-openai.png differ
diff --git a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
index 5fbc826ca4b07c037cd7c8728da4bd044f0a4ac9..0575f34c01b51e336ffb1f52b6e9ba7ecdba8e0c 100644
--- a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
+++ b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
@@ -5,6 +5,7 @@ import System from "@/models/system";
 import showToast from "@/utils/toast";
 import AnythingLLMIcon from "@/media/logo/anything-llm-icon.png";
 import OpenAiLogo from "@/media/llmprovider/openai.png";
+import GenericOpenAiLogo from "@/media/llmprovider/generic-openai.png";
 import AzureOpenAiLogo from "@/media/llmprovider/azure.png";
 import AnthropicLogo from "@/media/llmprovider/anthropic.png";
 import GeminiLogo from "@/media/llmprovider/gemini.png";
@@ -19,6 +20,7 @@ import OpenRouterLogo from "@/media/llmprovider/openrouter.jpeg";
 import GroqLogo from "@/media/llmprovider/groq.png";
 import PreLoader from "@/components/Preloader";
 import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
+import GenericOpenAiOptions from "@/components/LLMSelection/GenericOpenAiOptions";
 import AzureAiOptions from "@/components/LLMSelection/AzureAiOptions";
 import AnthropicAiOptions from "@/components/LLMSelection/AnthropicAiOptions";
 import LMStudioOptions from "@/components/LLMSelection/LMStudioOptions";
@@ -150,6 +152,14 @@ export const AVAILABLE_LLM_PROVIDERS = [
       "The fastest LLM inferencing available for real-time AI applications.",
     requiredConfig: ["GroqApiKey"],
   },
+  {
+    name: "Generic OpenAI",
+    value: "generic-openai",
+    logo: GenericOpenAiLogo,
+    options: (settings) => <GenericOpenAiOptions settings={settings} />,
+    description:
+      "Connect to any OpenAi-compatible service via a custom configuration",
+  },
   {
     name: "Native",
     value: "native",
diff --git a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
index b30dd45a9c4b773ae94d652a45d7f5c0242aca9d..548272fe0c9e0dfa2b0bdeaf9954f2f6ccb63412 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
@@ -2,6 +2,7 @@ import PreLoader from "@/components/Preloader";
 import System from "@/models/system";
 import AnythingLLMIcon from "@/media/logo/anything-llm-icon.png";
 import OpenAiLogo from "@/media/llmprovider/openai.png";
+import GenericOpenAiLogo from "@/media/llmprovider/generic-openai.png";
 import AzureOpenAiLogo from "@/media/llmprovider/azure.png";
 import AnthropicLogo from "@/media/llmprovider/anthropic.png";
 import GeminiLogo from "@/media/llmprovider/gemini.png";
@@ -136,6 +137,13 @@ export const LLM_SELECTION_PRIVACY = {
     ],
     logo: GroqLogo,
   },
+  "generic-openai": {
+    name: "Generic OpenAI compatible service",
+    description: [
+      "Data is shared according to the terms of service applicable with your generic endpoint provider.",
+    ],
+    logo: GenericOpenAiLogo,
+  },
 };
 
 export const VECTOR_DB_PRIVACY = {
diff --git a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
index 29b4e8456f938df7415f7824d121d6c77e0e044f..b9e0f5bb1d70111d5f2590630cebb443d0812713 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
@@ -1,6 +1,7 @@
 import { MagnifyingGlass } from "@phosphor-icons/react";
 import { useEffect, useState, useRef } from "react";
 import OpenAiLogo from "@/media/llmprovider/openai.png";
+import GenericOpenAiLogo from "@/media/llmprovider/generic-openai.png";
 import AzureOpenAiLogo from "@/media/llmprovider/azure.png";
 import AnthropicLogo from "@/media/llmprovider/anthropic.png";
 import GeminiLogo from "@/media/llmprovider/gemini.png";
@@ -15,6 +16,7 @@ import PerplexityLogo from "@/media/llmprovider/perplexity.png";
 import OpenRouterLogo from "@/media/llmprovider/openrouter.jpeg";
 import GroqLogo from "@/media/llmprovider/groq.png";
 import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
+import GenericOpenAiOptions from "@/components/LLMSelection/GenericOpenAiOptions";
 import AzureAiOptions from "@/components/LLMSelection/AzureAiOptions";
 import AnthropicAiOptions from "@/components/LLMSelection/AnthropicAiOptions";
 import LMStudioOptions from "@/components/LLMSelection/LMStudioOptions";
@@ -38,6 +40,120 @@ const TITLE = "LLM Preference";
 const DESCRIPTION =
   "AnythingLLM can work with many LLM providers. This will be the service which handles chatting.";
 
+const LLMS = [
+  {
+    name: "OpenAI",
+    value: "openai",
+    logo: OpenAiLogo,
+    options: (settings) => <OpenAiOptions settings={settings} />,
+    description: "The standard option for most non-commercial use.",
+  },
+  {
+    name: "Azure OpenAI",
+    value: "azure",
+    logo: AzureOpenAiLogo,
+    options: (settings) => <AzureAiOptions settings={settings} />,
+    description: "The enterprise option of OpenAI hosted on Azure services.",
+  },
+  {
+    name: "Anthropic",
+    value: "anthropic",
+    logo: AnthropicLogo,
+    options: (settings) => <AnthropicAiOptions settings={settings} />,
+    description: "A friendly AI Assistant hosted by Anthropic.",
+  },
+  {
+    name: "Gemini",
+    value: "gemini",
+    logo: GeminiLogo,
+    options: (settings) => <GeminiLLMOptions settings={settings} />,
+    description: "Google's largest and most capable AI model",
+  },
+  {
+    name: "HuggingFace",
+    value: "huggingface",
+    logo: HuggingFaceLogo,
+    options: (settings) => <HuggingFaceOptions settings={settings} />,
+    description:
+      "Access 150,000+ open-source LLMs and the world's AI community",
+  },
+  {
+    name: "Ollama",
+    value: "ollama",
+    logo: OllamaLogo,
+    options: (settings) => <OllamaLLMOptions settings={settings} />,
+    description: "Run LLMs locally on your own machine.",
+  },
+  {
+    name: "LM Studio",
+    value: "lmstudio",
+    logo: LMStudioLogo,
+    options: (settings) => <LMStudioOptions settings={settings} />,
+    description:
+      "Discover, download, and run thousands of cutting edge LLMs in a few clicks.",
+  },
+  {
+    name: "Local AI",
+    value: "localai",
+    logo: LocalAiLogo,
+    options: (settings) => <LocalAiOptions settings={settings} />,
+    description: "Run LLMs locally on your own machine.",
+  },
+  {
+    name: "Together AI",
+    value: "togetherai",
+    logo: TogetherAILogo,
+    options: (settings) => <TogetherAiOptions settings={settings} />,
+    description: "Run open source models from Together AI.",
+  },
+  {
+    name: "Mistral",
+    value: "mistral",
+    logo: MistralLogo,
+    options: (settings) => <MistralOptions settings={settings} />,
+    description: "Run open source models from Mistral AI.",
+  },
+  {
+    name: "Perplexity AI",
+    value: "perplexity",
+    logo: PerplexityLogo,
+    options: (settings) => <PerplexityOptions settings={settings} />,
+    description:
+      "Run powerful and internet-connected models hosted by Perplexity AI.",
+  },
+  {
+    name: "OpenRouter",
+    value: "openrouter",
+    logo: OpenRouterLogo,
+    options: (settings) => <OpenRouterOptions settings={settings} />,
+    description: "A unified interface for LLMs.",
+  },
+  {
+    name: "Groq",
+    value: "groq",
+    logo: GroqLogo,
+    options: (settings) => <GroqAiOptions settings={settings} />,
+    description:
+      "The fastest LLM inferencing available for real-time AI applications.",
+  },
+  {
+    name: "Generic OpenAI",
+    value: "generic-openai",
+    logo: GenericOpenAiLogo,
+    options: (settings) => <GenericOpenAiOptions settings={settings} />,
+    description:
+      "Connect to any OpenAi-compatible service via a custom configuration",
+  },
+  {
+    name: "Native",
+    value: "native",
+    logo: AnythingLLMIcon,
+    options: (settings) => <NativeLLMOptions settings={settings} />,
+    description:
+      "Use a downloaded custom Llama model for chatting on this AnythingLLM instance.",
+  },
+];
+
 export default function LLMPreference({
   setHeader,
   setForwardBtn,
@@ -61,112 +177,6 @@ export default function LLMPreference({
     fetchKeys();
   }, []);
 
-  const LLMS = [
-    {
-      name: "OpenAI",
-      value: "openai",
-      logo: OpenAiLogo,
-      options: <OpenAiOptions settings={settings} />,
-      description: "The standard option for most non-commercial use.",
-    },
-    {
-      name: "Azure OpenAI",
-      value: "azure",
-      logo: AzureOpenAiLogo,
-      options: <AzureAiOptions settings={settings} />,
-      description: "The enterprise option of OpenAI hosted on Azure services.",
-    },
-    {
-      name: "Anthropic",
-      value: "anthropic",
-      logo: AnthropicLogo,
-      options: <AnthropicAiOptions settings={settings} />,
-      description: "A friendly AI Assistant hosted by Anthropic.",
-    },
-    {
-      name: "Gemini",
-      value: "gemini",
-      logo: GeminiLogo,
-      options: <GeminiLLMOptions settings={settings} />,
-      description: "Google's largest and most capable AI model",
-    },
-    {
-      name: "HuggingFace",
-      value: "huggingface",
-      logo: HuggingFaceLogo,
-      options: <HuggingFaceOptions settings={settings} />,
-      description:
-        "Access 150,000+ open-source LLMs and the world's AI community",
-    },
-    {
-      name: "Ollama",
-      value: "ollama",
-      logo: OllamaLogo,
-      options: <OllamaLLMOptions settings={settings} />,
-      description: "Run LLMs locally on your own machine.",
-    },
-    {
-      name: "LM Studio",
-      value: "lmstudio",
-      logo: LMStudioLogo,
-      options: <LMStudioOptions settings={settings} />,
-      description:
-        "Discover, download, and run thousands of cutting edge LLMs in a few clicks.",
-    },
-    {
-      name: "Local AI",
-      value: "localai",
-      logo: LocalAiLogo,
-      options: <LocalAiOptions settings={settings} />,
-      description: "Run LLMs locally on your own machine.",
-    },
-    {
-      name: "Together AI",
-      value: "togetherai",
-      logo: TogetherAILogo,
-      options: <TogetherAiOptions settings={settings} />,
-      description: "Run open source models from Together AI.",
-    },
-    {
-      name: "Mistral",
-      value: "mistral",
-      logo: MistralLogo,
-      options: <MistralOptions settings={settings} />,
-      description: "Run open source models from Mistral AI.",
-    },
-    {
-      name: "Perplexity AI",
-      value: "perplexity",
-      logo: PerplexityLogo,
-      options: <PerplexityOptions settings={settings} />,
-      description:
-        "Run powerful and internet-connected models hosted by Perplexity AI.",
-    },
-    {
-      name: "OpenRouter",
-      value: "openrouter",
-      logo: OpenRouterLogo,
-      options: <OpenRouterOptions settings={settings} />,
-      description: "A unified interface for LLMs.",
-    },
-    {
-      name: "Groq",
-      value: "groq",
-      logo: GroqLogo,
-      options: <GroqAiOptions settings={settings} />,
-      description:
-        "The fastest LLM inferencing available for real-time AI applications.",
-    },
-    {
-      name: "Native",
-      value: "native",
-      logo: AnythingLLMIcon,
-      options: <NativeLLMOptions settings={settings} />,
-      description:
-        "Use a downloaded custom Llama model for chatting on this AnythingLLM instance.",
-    },
-  ];
-
   function handleForward() {
     if (hiddenSubmitButtonRef.current) {
       hiddenSubmitButtonRef.current.click();
@@ -251,7 +261,7 @@ export default function LLMPreference({
         </div>
         <div className="mt-4 flex flex-col gap-y-1">
           {selectedLLM &&
-            LLMS.find((llm) => llm.value === selectedLLM)?.options}
+            LLMS.find((llm) => llm.value === selectedLLM)?.options(settings)}
         </div>
         <button
           type="submit"
diff --git a/server/.env.example b/server/.env.example
index 131dcf895e0da1073166bf26be32954cda4f5070..c333e4ec08574c46800eb7dc4c824a579e5ee08b 100644
--- a/server/.env.example
+++ b/server/.env.example
@@ -63,6 +63,12 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea
 # GROQ_API_KEY=gsk_abcxyz
 # GROQ_MODEL_PREF=llama2-70b-4096
 
+# LLM_PROVIDER='generic-openai'
+# GENERIC_OPEN_AI_BASE_PATH='http://proxy.url.openai.com/v1'
+# GENERIC_OPEN_AI_MODEL_PREF='gpt-3.5-turbo'
+# GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT=4096
+# GENERIC_OPEN_AI_API_KEY=sk-123abc
+
 ###########################################
 ######## Embedding API SElECTION ##########
 ###########################################
diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js
index 20c161cd52dde343ddd3992d35955911bc4e2db2..bdec2af3d4fdeff0c4f493fca41d619520ee6fe2 100644
--- a/server/models/systemSettings.js
+++ b/server/models/systemSettings.js
@@ -358,6 +358,12 @@ const SystemSettings = {
       HuggingFaceLLMEndpoint: process.env.HUGGING_FACE_LLM_ENDPOINT,
       HuggingFaceLLMAccessToken: !!process.env.HUGGING_FACE_LLM_API_KEY,
       HuggingFaceLLMTokenLimit: process.env.HUGGING_FACE_LLM_TOKEN_LIMIT,
+
+      // Generic OpenAI Keys
+      GenericOpenAiBasePath: process.env.GENERIC_OPEN_AI_BASE_PATH,
+      GenericOpenAiModelPref: process.env.GENERIC_OPEN_AI_MODEL_PREF,
+      GenericOpenAiTokenLimit: process.env.GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT,
+      GenericOpenAiKey: !!process.env.GENERIC_OPEN_AI_API_KEY,
     };
   },
 };
diff --git a/server/utils/AiProviders/genericOpenAi/index.js b/server/utils/AiProviders/genericOpenAi/index.js
new file mode 100644
index 0000000000000000000000000000000000000000..61e7bccf0f059ad90fe89ae715f44bf5a47d9bb9
--- /dev/null
+++ b/server/utils/AiProviders/genericOpenAi/index.js
@@ -0,0 +1,193 @@
+const { NativeEmbedder } = require("../../EmbeddingEngines/native");
+const { chatPrompt } = require("../../chats");
+const { handleDefaultStreamResponse } = require("../../helpers/chat/responses");
+
+class GenericOpenAiLLM {
+  constructor(embedder = null, modelPreference = null) {
+    const { Configuration, OpenAIApi } = require("openai");
+    if (!process.env.GENERIC_OPEN_AI_BASE_PATH)
+      throw new Error(
+        "GenericOpenAI must have a valid base path to use for the api."
+      );
+
+    this.basePath = process.env.GENERIC_OPEN_AI_BASE_PATH;
+    const config = new Configuration({
+      basePath: this.basePath,
+      apiKey: process.env.GENERIC_OPEN_AI_API_KEY ?? null,
+    });
+    this.openai = new OpenAIApi(config);
+    this.model =
+      modelPreference ?? process.env.GENERIC_OPEN_AI_MODEL_PREF ?? null;
+    if (!this.model)
+      throw new Error("GenericOpenAI must have a valid model set.");
+    this.limits = {
+      history: this.promptWindowLimit() * 0.15,
+      system: this.promptWindowLimit() * 0.15,
+      user: this.promptWindowLimit() * 0.7,
+    };
+
+    if (!embedder)
+      console.warn(
+        "No embedding provider defined for GenericOpenAiLLM - falling back to NativeEmbedder for embedding!"
+      );
+    this.embedder = !embedder ? new NativeEmbedder() : embedder;
+    this.defaultTemp = 0.7;
+    this.log(`Inference API: ${this.basePath} Model: ${this.model}`);
+  }
+
+  log(text, ...args) {
+    console.log(`\x1b[36m[${this.constructor.name}]\x1b[0m ${text}`, ...args);
+  }
+
+  #appendContext(contextTexts = []) {
+    if (!contextTexts || !contextTexts.length) return "";
+    return (
+      "\nContext:\n" +
+      contextTexts
+        .map((text, i) => {
+          return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+        })
+        .join("")
+    );
+  }
+
+  streamingEnabled() {
+    return "streamChat" in this && "streamGetChatCompletion" in this;
+  }
+
+  // Ensure the user set a value for the token limit
+  // and if undefined - assume 4096 window.
+  promptWindowLimit() {
+    const limit = process.env.GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT || 4096;
+    if (!limit || isNaN(Number(limit)))
+      throw new Error("No token context limit was set.");
+    return Number(limit);
+  }
+
+  // Short circuit since we have no idea if the model is valid or not
+  // in pre-flight for generic endpoints
+  isValidChatCompletionModel(_modelName = "") {
+    return true;
+  }
+
+  constructPrompt({
+    systemPrompt = "",
+    contextTexts = [],
+    chatHistory = [],
+    userPrompt = "",
+  }) {
+    const prompt = {
+      role: "system",
+      content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
+    };
+    return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
+  }
+
+  async isSafe(_input = "") {
+    // Not implemented so must be stubbed
+    return { safe: true, reasons: [] };
+  }
+
+  async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
+    const textResponse = await this.openai
+      .createChatCompletion({
+        model: this.model,
+        temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
+        n: 1,
+        messages: await this.compressMessages(
+          {
+            systemPrompt: chatPrompt(workspace),
+            userPrompt: prompt,
+            chatHistory,
+          },
+          rawHistory
+        ),
+      })
+      .then((json) => {
+        const res = json.data;
+        if (!res.hasOwnProperty("choices"))
+          throw new Error("GenericOpenAI chat: No results!");
+        if (res.choices.length === 0)
+          throw new Error("GenericOpenAI chat: No results length!");
+        return res.choices[0].message.content;
+      })
+      .catch((error) => {
+        throw new Error(
+          `GenericOpenAI::createChatCompletion failed with: ${error.message}`
+        );
+      });
+
+    return textResponse;
+  }
+
+  async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
+    const streamRequest = await this.openai.createChatCompletion(
+      {
+        model: this.model,
+        stream: true,
+        temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
+        n: 1,
+        messages: await this.compressMessages(
+          {
+            systemPrompt: chatPrompt(workspace),
+            userPrompt: prompt,
+            chatHistory,
+          },
+          rawHistory
+        ),
+      },
+      { responseType: "stream" }
+    );
+    return streamRequest;
+  }
+
+  async getChatCompletion(messages = null, { temperature = 0.7 }) {
+    const { data } = await this.openai
+      .createChatCompletion({
+        model: this.model,
+        messages,
+        temperature,
+      })
+      .catch((e) => {
+        throw new Error(e.response.data.error.message);
+      });
+
+    if (!data.hasOwnProperty("choices")) return null;
+    return data.choices[0].message.content;
+  }
+
+  async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
+    const streamRequest = await this.openai.createChatCompletion(
+      {
+        model: this.model,
+        stream: true,
+        messages,
+        temperature,
+      },
+      { responseType: "stream" }
+    );
+    return streamRequest;
+  }
+
+  handleStream(response, stream, responseProps) {
+    return handleDefaultStreamResponse(response, stream, responseProps);
+  }
+
+  // Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
+  async embedTextInput(textInput) {
+    return await this.embedder.embedTextInput(textInput);
+  }
+  async embedChunks(textChunks = []) {
+    return await this.embedder.embedChunks(textChunks);
+  }
+
+  async compressMessages(promptArgs = {}, rawHistory = []) {
+    const { messageArrayCompressor } = require("../../helpers/chat");
+    const messageArray = this.constructPrompt(promptArgs);
+    return await messageArrayCompressor(this, messageArray, rawHistory);
+  }
+}
+
+module.exports = {
+  GenericOpenAiLLM,
+};
diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js
index 837a28f60a7ee1551ccc5c42a395c7ced60df865..c8cdd870f5e298df44fa5f6a8138a3ab2e847f6a 100644
--- a/server/utils/helpers/index.js
+++ b/server/utils/helpers/index.js
@@ -77,8 +77,13 @@ function getLLMProvider({ provider = null, model = null } = {}) {
     case "groq":
       const { GroqLLM } = require("../AiProviders/groq");
       return new GroqLLM(embedder, model);
+    case "generic-openai":
+      const { GenericOpenAiLLM } = require("../AiProviders/genericOpenAi");
+      return new GenericOpenAiLLM(embedder, model);
     default:
-      throw new Error("ENV: No LLM_PROVIDER value found in environment!");
+      throw new Error(
+        `ENV: No valid LLM_PROVIDER value found in environment! Using ${process.env.LLM_PROVIDER}`
+      );
   }
 }
 
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index ee9d4effabe4a5bdb1d1f4c2ede2d4c55195b80d..5e629baf856cc3b68355e1538f192317ec9ed457 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -132,6 +132,24 @@ const KEY_MAPPING = {
     checks: [nonZero],
   },
 
+  // Generic OpenAI InferenceSettings
+  GenericOpenAiBasePath: {
+    envKey: "GENERIC_OPEN_AI_BASE_PATH",
+    checks: [isValidURL],
+  },
+  GenericOpenAiModelPref: {
+    envKey: "GENERIC_OPEN_AI_MODEL_PREF",
+    checks: [isNotEmpty],
+  },
+  GenericOpenAiTokenLimit: {
+    envKey: "GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT",
+    checks: [nonZero],
+  },
+  GenericOpenAiKey: {
+    envKey: "GENERIC_OPEN_AI_API_KEY",
+    checks: [],
+  },
+
   EmbeddingEngine: {
     envKey: "EMBEDDING_ENGINE",
     checks: [supportedEmbeddingModel],
@@ -375,6 +393,7 @@ function supportedLLM(input = "") {
     "perplexity",
     "openrouter",
     "groq",
+    "generic-openai",
   ].includes(input);
   return validSelection ? null : `${input} is not a valid LLM provider.`;
 }