diff --git a/docker/.env.example b/docker/.env.example
index 6368a1900b783876aeb206a1e2c55982f7e5eae4..174a9d692724b036f0d297a0aeddafcc9d6b9345 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -128,6 +128,12 @@ GID='1000'
 # VOYAGEAI_API_KEY=
 # EMBEDDING_MODEL_PREF='voyage-large-2-instruct'
 
+# EMBEDDING_ENGINE='litellm'
+# EMBEDDING_MODEL_PREF='text-embedding-ada-002'
+# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
+# LITE_LLM_BASE_PATH='http://127.0.0.1:4000'
+# LITE_LLM_API_KEY='sk-123abc'
+
 ###########################################
 ######## Vector Database Selection ########
 ###########################################
diff --git a/frontend/src/components/EmbeddingSelection/LiteLLMOptions/index.jsx b/frontend/src/components/EmbeddingSelection/LiteLLMOptions/index.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..d5586c883df37ce5c87ec90e4491ce5592b109b6
--- /dev/null
+++ b/frontend/src/components/EmbeddingSelection/LiteLLMOptions/index.jsx
@@ -0,0 +1,186 @@
+import { useEffect, useState } from "react";
+import System from "@/models/system";
+import { Warning } from "@phosphor-icons/react";
+import { Tooltip } from "react-tooltip";
+
+export default function LiteLLMOptions({ settings }) {
+  const [basePathValue, setBasePathValue] = useState(settings?.LiteLLMBasePath);
+  const [basePath, setBasePath] = useState(settings?.LiteLLMBasePath);
+  const [apiKeyValue, setApiKeyValue] = useState(settings?.LiteLLMAPIKey);
+  const [apiKey, setApiKey] = useState(settings?.LiteLLMAPIKey);
+
+  return (
+    <div className="w-full flex flex-col gap-y-4">
+      <div className="w-full flex items-center gap-4">
+        <div className="flex flex-col w-60">
+          <label className="text-white text-sm font-semibold block mb-4">
+            Base URL
+          </label>
+          <input
+            type="url"
+            name="LiteLLMBasePath"
+            className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
+            placeholder="http://127.0.0.1:4000"
+            defaultValue={settings?.LiteLLMBasePath}
+            required={true}
+            autoComplete="off"
+            spellCheck={false}
+            onChange={(e) => setBasePathValue(e.target.value)}
+            onBlur={() => setBasePath(basePathValue)}
+          />
+        </div>
+        <LiteLLMModelSelection
+          settings={settings}
+          basePath={basePath}
+          apiKey={apiKey}
+        />
+        <div className="flex flex-col w-60">
+          <label className="text-white text-sm font-semibold block mb-4">
+            Max embedding chunk length
+          </label>
+          <input
+            type="number"
+            name="EmbeddingModelMaxChunkLength"
+            className="bg-zinc-900 text-white placeholder-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
+            placeholder="8192"
+            min={1}
+            onScroll={(e) => e.target.blur()}
+            defaultValue={settings?.EmbeddingModelMaxChunkLength}
+            required={false}
+            autoComplete="off"
+          />
+        </div>
+      </div>
+      <div className="w-full flex items-center gap-4">
+        <div className="flex flex-col w-60">
+          <div className="flex flex-col gap-y-1 mb-4">
+            <label className="text-white text-sm font-semibold flex items-center gap-x-2">
+              API Key <p className="!text-xs !italic !font-thin">optional</p>
+            </label>
+          </div>
+          <input
+            type="password"
+            name="LiteLLMAPIKey"
+            className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
+            placeholder="sk-mysecretkey"
+            defaultValue={settings?.LiteLLMAPIKey ? "*".repeat(20) : ""}
+            autoComplete="off"
+            spellCheck={false}
+            onChange={(e) => setApiKeyValue(e.target.value)}
+            onBlur={() => setApiKey(apiKeyValue)}
+          />
+        </div>
+      </div>
+    </div>
+  );
+}
+
+function LiteLLMModelSelection({ settings, basePath = null, apiKey = null }) {
+  const [customModels, setCustomModels] = useState([]);
+  const [loading, setLoading] = useState(true);
+
+  useEffect(() => {
+    async function findCustomModels() {
+      if (!basePath) {
+        setCustomModels([]);
+        setLoading(false);
+        return;
+      }
+      setLoading(true);
+      const { models } = await System.customModels(
+        "litellm",
+        typeof apiKey === "boolean" ? null : apiKey,
+        basePath
+      );
+      setCustomModels(models || []);
+      setLoading(false);
+    }
+    findCustomModels();
+  }, [basePath, apiKey]);
+
+  if (loading || customModels.length == 0) {
+    return (
+      <div className="flex flex-col w-60">
+        <label className="text-white text-sm font-semibold block mb-4">
+          Embedding Model Selection
+        </label>
+        <select
+          name="EmbeddingModelPref"
+          disabled={true}
+          className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
+        >
+          <option disabled={true} selected={true}>
+            {basePath?.includes("/v1")
+              ? "-- loading available models --"
+              : "-- waiting for URL --"}
+          </option>
+        </select>
+      </div>
+    );
+  }
+
+  return (
+    <div className="flex flex-col w-60">
+      <div className="flex items-center">
+        <label className="text-white text-sm font-semibold block mb-4">
+          Embedding Model Selection
+        </label>
+        <EmbeddingModelTooltip />
+      </div>
+      <select
+        name="EmbeddingModelPref"
+        required={true}
+        className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
+      >
+        {customModels.length > 0 && (
+          <optgroup label="Your loaded models">
+            {customModels.map((model) => {
+              return (
+                <option
+                  key={model.id}
+                  value={model.id}
+                  selected={settings.EmbeddingModelPref === model.id}
+                >
+                  {model.id}
+                </option>
+              );
+            })}
+          </optgroup>
+        )}
+      </select>
+    </div>
+  );
+}
+
+function EmbeddingModelTooltip() {
+  return (
+    <div className="flex items-center justify-center -mt-3 ml-1">
+      <Warning
+        size={14}
+        className="ml-1 text-orange-500 cursor-pointer"
+        data-tooltip-id="model-tooltip"
+        data-tooltip-place="right"
+      />
+      <Tooltip
+        delayHide={300}
+        id="model-tooltip"
+        className="max-w-xs"
+        clickable={true}
+      >
+        <p className="text-sm">
+          Be sure to select a valid embedding model. Chat models are not
+          embedding models. See{" "}
+          <a
+            href="https://litellm.vercel.app/docs/embedding/supported_embedding"
+            target="_blank"
+            rel="noreferrer"
+            className="underline"
+          >
+            this page
+          </a>{" "}
+          for more information.
+        </p>
+      </Tooltip>
+    </div>
+  );
+}
diff --git a/frontend/src/pages/GeneralSettings/EmbeddingPreference/index.jsx b/frontend/src/pages/GeneralSettings/EmbeddingPreference/index.jsx
index 5a0f51c1df55092f47385469aa35f47ecf34f2e5..4d032dc016df521d28d20dc4faf8597e6e2da949 100644
--- a/frontend/src/pages/GeneralSettings/EmbeddingPreference/index.jsx
+++ b/frontend/src/pages/GeneralSettings/EmbeddingPreference/index.jsx
@@ -11,6 +11,7 @@ import OllamaLogo from "@/media/llmprovider/ollama.png";
 import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
 import CohereLogo from "@/media/llmprovider/cohere.png";
 import VoyageAiLogo from "@/media/embeddingprovider/voyageai.png";
+import LiteLLMLogo from "@/media/llmprovider/litellm.png";
 
 import PreLoader from "@/components/Preloader";
 import ChangeWarningModal from "@/components/ChangeWarning";
@@ -22,6 +23,7 @@ import OllamaEmbeddingOptions from "@/components/EmbeddingSelection/OllamaOption
 import LMStudioEmbeddingOptions from "@/components/EmbeddingSelection/LMStudioOptions";
 import CohereEmbeddingOptions from "@/components/EmbeddingSelection/CohereOptions";
 import VoyageAiOptions from "@/components/EmbeddingSelection/VoyageAiOptions";
+import LiteLLMOptions from "@/components/EmbeddingSelection/LiteLLMOptions";
 
 import EmbedderItem from "@/components/EmbeddingSelection/EmbedderItem";
 import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
@@ -88,6 +90,13 @@ const EMBEDDERS = [
     options: (settings) => <VoyageAiOptions settings={settings} />,
     description: "Run powerful embedding models from Voyage AI.",
   },
+  {
+    name: "LiteLLM",
+    value: "litellm",
+    logo: LiteLLMLogo,
+    options: (settings) => <LiteLLMOptions settings={settings} />,
+    description: "Run powerful embedding models from LiteLLM.",
+  },
 ];
 
 export default function GeneralEmbeddingPreference() {
diff --git a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
index 35358636d0955e2ff991cb543daa3e6a33e0a75e..b4fa666ff4f1bc21cf5a07a20d5ecf74a0a8080f 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
@@ -301,6 +301,13 @@ export const EMBEDDING_ENGINE_PRIVACY = {
     ],
     logo: VoyageAiLogo,
   },
+  litellm: {
+    name: "LiteLLM",
+    description: [
+      "Your document text is only accessible on the server running LiteLLM and to the providers you configured in LiteLLM.",
+    ],
+    logo: LiteLLMLogo,
+  },
 };
 
 export default function DataHandling({ setHeader, setForwardBtn, setBackBtn }) {
diff --git a/server/.env.example b/server/.env.example
index f51d61771800751b7ec72f100e7de212fd025975..6148d594f925e866bf8db40dff76d7a929e47f01 100644
--- a/server/.env.example
+++ b/server/.env.example
@@ -125,6 +125,12 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea
 # VOYAGEAI_API_KEY=
 # EMBEDDING_MODEL_PREF='voyage-large-2-instruct'
 
+# EMBEDDING_ENGINE='litellm'
+# EMBEDDING_MODEL_PREF='text-embedding-ada-002'
+# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
+# LITE_LLM_BASE_PATH='http://127.0.0.1:4000'
+# LITE_LLM_API_KEY='sk-123abc'
+
 ###########################################
 ######## Vector Database Selection ########
 ###########################################
diff --git a/server/utils/EmbeddingEngines/liteLLM/index.js b/server/utils/EmbeddingEngines/liteLLM/index.js
new file mode 100644
index 0000000000000000000000000000000000000000..cd22480b1c6517517fbceaa9150730c0812c3ac7
--- /dev/null
+++ b/server/utils/EmbeddingEngines/liteLLM/index.js
@@ -0,0 +1,93 @@
+const { toChunks, maximumChunkLength } = require("../../helpers");
+
+class LiteLLMEmbedder {
+  constructor() {
+    const { OpenAI: OpenAIApi } = require("openai");
+    if (!process.env.LITE_LLM_BASE_PATH)
+      throw new Error(
+        "LiteLLM must have a valid base path to use for the api."
+      );
+    this.basePath = process.env.LITE_LLM_BASE_PATH;
+    this.openai = new OpenAIApi({
+      baseURL: this.basePath,
+      apiKey: process.env.LITE_LLM_API_KEY ?? null,
+    });
+    this.model = process.env.EMBEDDING_MODEL_PREF || "text-embedding-ada-002";
+
+    // Limit of how many strings we can process in a single pass to stay with resource or network limits
+    this.maxConcurrentChunks = 500;
+    this.embeddingMaxChunkLength = maximumChunkLength();
+  }
+
+  async embedTextInput(textInput) {
+    const result = await this.embedChunks(
+      Array.isArray(textInput) ? textInput : [textInput]
+    );
+    return result?.[0] || [];
+  }
+
+  async embedChunks(textChunks = []) {
+    // Because there is a hard POST limit on how many chunks can be sent at once to LiteLLM (~8mb)
+    // we concurrently execute each max batch of text chunks possible.
+    // Refer to constructor maxConcurrentChunks for more info.
+    const embeddingRequests = [];
+    for (const chunk of toChunks(textChunks, this.maxConcurrentChunks)) {
+      embeddingRequests.push(
+        new Promise((resolve) => {
+          this.openai.embeddings
+            .create({
+              model: this.model,
+              input: chunk,
+            })
+            .then((result) => {
+              resolve({ data: result?.data, error: null });
+            })
+            .catch((e) => {
+              e.type =
+                e?.response?.data?.error?.code ||
+                e?.response?.status ||
+                "failed_to_embed";
+              e.message = e?.response?.data?.error?.message || e.message;
+              resolve({ data: [], error: e });
+            });
+        })
+      );
+    }
+
+    const { data = [], error = null } = await Promise.all(
+      embeddingRequests
+    ).then((results) => {
+      // If any errors were returned from LiteLLM abort the entire sequence because the embeddings
+      // will be incomplete.
+      const errors = results
+        .filter((res) => !!res.error)
+        .map((res) => res.error)
+        .flat();
+      if (errors.length > 0) {
+        let uniqueErrors = new Set();
+        errors.map((error) =>
+          uniqueErrors.add(`[${error.type}]: ${error.message}`)
+        );
+
+        return {
+          data: [],
+          error: Array.from(uniqueErrors).join(", "),
+        };
+      }
+      return {
+        data: results.map((res) => res?.data || []).flat(),
+        error: null,
+      };
+    });
+
+    if (!!error) throw new Error(`LiteLLM Failed to embed: ${error}`);
+    return data.length > 0 &&
+      data.every((embd) => embd.hasOwnProperty("embedding"))
+      ? data.map((embd) => embd.embedding)
+      : null;
+  }
+}
+
+module.exports = {
+  LiteLLMEmbedder,
+};
diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js
index e60202a6025db055c251ad252d58585a0c6adcfb..8f0df126473e246cada614f67c78d2f17306c08c 100644
--- a/server/utils/helpers/index.js
+++ b/server/utils/helpers/index.js
@@ -128,6 +128,9 @@ function getEmbeddingEngineSelection() {
     case "voyageai":
       const { VoyageAiEmbedder } = require("../EmbeddingEngines/voyageAi");
       return new VoyageAiEmbedder();
+    case "litellm":
+      const { LiteLLMEmbedder } = require("../EmbeddingEngines/liteLLM");
+      return new LiteLLMEmbedder();
     default:
       return new NativeEmbedder();
   }
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index d5cdc68f2e221b4f61c0ed7fcd08fa86d3e0aac2..1a0e710a97561e2cc04e6952da9c9d669adaad35 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -577,6 +577,7 @@ function supportedEmbeddingModel(input = "") {
     "lmstudio",
     "cohere",
     "voyageai",
+    "litellm",
   ];
   return supported.includes(input)
     ? null