diff --git a/docker/.env.example b/docker/.env.example
index 5905c578bacf73dd182f20978f1e587886b9f844..74ab3ef62c8ca516b3ad0d6691f15b25f863c2ba 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -35,6 +35,15 @@ CACHE_VECTORS="true"
 # EMBEDDING_ENGINE='openai'
 # OPEN_AI_KEY=sk-xxxx
 
+# EMBEDDING_ENGINE='azure'
+# AZURE_OPENAI_ENDPOINT=
+# AZURE_OPENAI_KEY=
+# EMBEDDING_MODEL_PREF='my-embedder-model' # This is the "deployment" on Azure you want to use for embeddings. Not the base model. Valid base model is text-embedding-ada-002
+
+# EMBEDDING_ENGINE='localai'
+# EMBEDDING_BASE_PATH='https://localhost:8080/v1'
+# EMBEDDING_MODEL_PREF='text-embedding-ada-002'
+
 ###########################################
 ######## Vector Database Selection ########
 ###########################################
diff --git a/frontend/src/components/Modals/MangeWorkspace/Documents/index.jsx b/frontend/src/components/Modals/MangeWorkspace/Documents/index.jsx
index 7485d0a910c079c4f15c89ca10c51ca5873a3d8f..52b818ffafca176e7a0f66c50a09745babfaf06e 100644
--- a/frontend/src/components/Modals/MangeWorkspace/Documents/index.jsx
+++ b/frontend/src/components/Modals/MangeWorkspace/Documents/index.jsx
@@ -8,7 +8,11 @@ import WorkspaceDirectory from "./WorkspaceDirectory";
 
 const COST_PER_TOKEN = 0.0004;
 
-export default function DocumentSettings({ workspace, fileTypes }) {
+export default function DocumentSettings({
+  workspace,
+  fileTypes,
+  systemSettings,
+}) {
   const [highlightWorkspace, setHighlightWorkspace] = useState(false);
   const [availableDocs, setAvailableDocs] = useState([]);
   const [loading, setLoading] = useState(true);
@@ -135,8 +139,15 @@ export default function DocumentSettings({ workspace, fileTypes }) {
       }
     });
 
-    const dollarAmount = (totalTokenCount / 1000) * COST_PER_TOKEN;
-    setEmbeddingsCost(dollarAmount);
+    // Do not do cost estimation unless the embedding engine is OpenAi.
+    if (
+      !systemSettings?.EmbeddingEngine ||
+      systemSettings.EmbeddingEngine === "openai"
+    ) {
+      const dollarAmount = (totalTokenCount / 1000) * COST_PER_TOKEN;
+      setEmbeddingsCost(dollarAmount);
+    }
+
     setMovedItems([...movedItems, ...newMovedItems]);
 
     let newAvailableDocs = JSON.parse(JSON.stringify(availableDocs));
diff --git a/frontend/src/components/Modals/MangeWorkspace/index.jsx b/frontend/src/components/Modals/MangeWorkspace/index.jsx
index d38cc35301f456a24b45aedd68b701899ec6b6ed..946e96d265d7e7f14c15ccfa67da7763e9708d83 100644
--- a/frontend/src/components/Modals/MangeWorkspace/index.jsx
+++ b/frontend/src/components/Modals/MangeWorkspace/index.jsx
@@ -15,11 +15,14 @@ const ManageWorkspace = ({ hideModal = noop, providedSlug = null }) => {
   const [selectedTab, setSelectedTab] = useState("documents");
   const [workspace, setWorkspace] = useState(null);
   const [fileTypes, setFileTypes] = useState(null);
+  const [settings, setSettings] = useState({});
 
   useEffect(() => {
     async function checkSupportedFiletypes() {
       const acceptedTypes = await System.acceptedDocumentTypes();
+      const _settings = await System.keys();
       setFileTypes(acceptedTypes ?? {});
+      setSettings(_settings ?? {});
     }
     checkSupportedFiletypes();
   }, []);
@@ -104,7 +107,11 @@ const ManageWorkspace = ({ hideModal = noop, providedSlug = null }) => {
           </div>
           <Suspense fallback={<div>Loading...</div>}>
             <div className={selectedTab === "documents" ? "" : "hidden"}>
-              <DocumentSettings workspace={workspace} fileTypes={fileTypes} />
+              <DocumentSettings
+                workspace={workspace}
+                fileTypes={fileTypes}
+                systemSettings={settings}
+              />
             </div>
             <div className={selectedTab === "settings" ? "" : "hidden"}>
               <WorkspaceSettings workspace={workspace} fileTypes={fileTypes} />
diff --git a/frontend/src/pages/GeneralSettings/EmbeddingPreference/index.jsx b/frontend/src/pages/GeneralSettings/EmbeddingPreference/index.jsx
index dd5b143393d4d18001763b22ad7a1a17d0595095..c81b0e527e31e0f3fa159014687939f56775631b 100644
--- a/frontend/src/pages/GeneralSettings/EmbeddingPreference/index.jsx
+++ b/frontend/src/pages/GeneralSettings/EmbeddingPreference/index.jsx
@@ -7,6 +7,7 @@ import System from "../../../models/system";
 import showToast from "../../../utils/toast";
 import OpenAiLogo from "../../../media/llmprovider/openai.png";
 import AzureOpenAiLogo from "../../../media/llmprovider/azure.png";
+import LocalAiLogo from "../../../media/llmprovider/localai.png";
 import PreLoader from "../../../components/Preloader";
 import LLMProviderOption from "../../../components/LLMSelection/LLMProviderOption";
 
@@ -16,6 +17,8 @@ export default function GeneralEmbeddingPreference() {
   const [embeddingChoice, setEmbeddingChoice] = useState("openai");
   const [settings, setSettings] = useState(null);
   const [loading, setLoading] = useState(true);
+  const [basePathValue, setBasePathValue] = useState("");
+  const [basePath, setBasePath] = useState("");
 
   const handleSubmit = async (e) => {
     e.preventDefault();
@@ -38,11 +41,17 @@ export default function GeneralEmbeddingPreference() {
     setHasChanges(true);
   };
 
+  function updateBasePath() {
+    setBasePath(basePathValue);
+  }
+
   useEffect(() => {
     async function fetchKeys() {
       const _settings = await System.keys();
       setSettings(_settings);
       setEmbeddingChoice(_settings?.EmbeddingEngine || "openai");
+      setBasePath(_settings?.EmbeddingBasePath || "");
+      setBasePathValue(_settings?.EmbeddingBasePath || "");
       setLoading(false);
     }
     fetchKeys();
@@ -136,6 +145,15 @@ export default function GeneralEmbeddingPreference() {
                       image={AzureOpenAiLogo}
                       onClick={updateChoice}
                     />
+                    <LLMProviderOption
+                      name="LocalAI"
+                      value="localai"
+                      link="localai.io"
+                      description="Self hosted LocalAI embedding engine."
+                      checked={embeddingChoice === "localai"}
+                      image={LocalAiLogo}
+                      onClick={updateChoice}
+                    />
                   </div>
                   <div className="mt-10 flex flex-wrap gap-4 max-w-[800px]">
                     {embeddingChoice === "openai" && (
@@ -215,6 +233,32 @@ export default function GeneralEmbeddingPreference() {
                         </div>
                       </>
                     )}
+
+                    {embeddingChoice === "localai" && (
+                      <>
+                        <div className="flex flex-col w-60">
+                          <label className="text-white text-sm font-semibold block mb-4">
+                            LocalAI Base URL
+                          </label>
+                          <input
+                            type="url"
+                            name="EmbeddingBasePath"
+                            className="bg-zinc-900 text-white placeholder-white placeholder-opacity-60 text-sm rounded-lg focus:border-white block w-full p-2.5"
+                            placeholder="http://localhost:8080/v1"
+                            defaultValue={settings?.EmbeddingBasePath}
+                            onChange={(e) => setBasePathValue(e.target.value)}
+                            onBlur={updateBasePath}
+                            required={true}
+                            autoComplete="off"
+                            spellCheck={false}
+                          />
+                        </div>
+                        <LocalAIModelSelection
+                          settings={settings}
+                          basePath={basePath}
+                        />
+                      </>
+                    )}
                   </div>
                 </>
               )}
@@ -225,3 +269,73 @@ export default function GeneralEmbeddingPreference() {
     </div>
   );
 }
+
+function LocalAIModelSelection({ settings, basePath = null }) {
+  const [customModels, setCustomModels] = useState([]);
+  const [loading, setLoading] = useState(true);
+
+  useEffect(() => {
+    async function findCustomModels() {
+      if (!basePath || !basePath.includes("/v1")) {
+        setCustomModels([]);
+        setLoading(false);
+        return;
+      }
+      setLoading(true);
+      const { models } = await System.customModels("localai", null, basePath);
+      setCustomModels(models || []);
+      setLoading(false);
+    }
+    findCustomModels();
+  }, [basePath]);
+
+  if (loading || customModels.length == 0) {
+    return (
+      <div className="flex flex-col w-60">
+        <label className="text-white text-sm font-semibold block mb-4">
+          Embedding Model Name
+        </label>
+        <select
+          name="EmbeddingModelPref"
+          disabled={true}
+          className="bg-zinc-900 border border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
+        >
+          <option disabled={true} selected={true}>
+            {basePath?.includes("/v1")
+              ? "-- loading available models --"
+              : "-- waiting for URL --"}
+          </option>
+        </select>
+      </div>
+    );
+  }
+
+  return (
+    <div className="flex flex-col w-60">
+      <label className="text-white text-sm font-semibold block mb-4">
+        Embedding Model Name
+      </label>
+      <select
+        name="EmbeddingModelPref"
+        required={true}
+        className="bg-zinc-900 border border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
+      >
+        {customModels.length > 0 && (
+          <optgroup label="Your loaded models">
+            {customModels.map((model) => {
+              return (
+                <option
+                  key={model.id}
+                  value={model.id}
+                  selected={settings?.EmbeddingModelPref === model.id}
+                >
+                  {model.id}
+                </option>
+              );
+            })}
+          </optgroup>
+        )}
+      </select>
+    </div>
+  );
+}
diff --git a/frontend/src/pages/OnboardingFlow/OnboardingModal/Steps/EmbeddingSelection/index.jsx b/frontend/src/pages/OnboardingFlow/OnboardingModal/Steps/EmbeddingSelection/index.jsx
index 4edde9be0431b2c69d2b1db192bcfbd8d6701caf..109e4115f0c58102ac9df473c0eaad121bfa0c0b 100644
--- a/frontend/src/pages/OnboardingFlow/OnboardingModal/Steps/EmbeddingSelection/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/OnboardingModal/Steps/EmbeddingSelection/index.jsx
@@ -1,6 +1,7 @@
 import React, { memo, useEffect, useState } from "react";
 import OpenAiLogo from "../../../../../media/llmprovider/openai.png";
 import AzureOpenAiLogo from "../../../../../media/llmprovider/azure.png";
+import LocalAiLogo from "../../../../../media/llmprovider/localai.png";
 import System from "../../../../../models/system";
 import PreLoader from "../../../../../components/Preloader";
 import LLMProviderOption from "../../../../../components/LLMSelection/LLMProviderOption";
@@ -9,16 +10,23 @@ function EmbeddingSelection({ nextStep, prevStep, currentStep }) {
   const [embeddingChoice, setEmbeddingChoice] = useState("openai");
   const [settings, setSettings] = useState(null);
   const [loading, setLoading] = useState(true);
+  const [basePathValue, setBasePathValue] = useState("");
+  const [basePath, setBasePath] = useState("");
 
   const updateChoice = (selection) => {
     setEmbeddingChoice(selection);
   };
 
+  function updateBasePath() {
+    setBasePath(basePathValue);
+  }
+
   useEffect(() => {
     async function fetchKeys() {
       const _settings = await System.keys();
       setSettings(_settings);
       setEmbeddingChoice(_settings?.EmbeddingEngine || "openai");
+      setBasePathValue(_settings?.EmbeddingBasePath || "");
       setLoading(false);
     }
     fetchKeys();
@@ -77,6 +85,15 @@ function EmbeddingSelection({ nextStep, prevStep, currentStep }) {
               image={AzureOpenAiLogo}
               onClick={updateChoice}
             />
+            <LLMProviderOption
+              name="LocalAI"
+              value="localai"
+              link="localai.io"
+              description="Self hosted LocalAI embedding engine."
+              checked={embeddingChoice === "localai"}
+              image={LocalAiLogo}
+              onClick={updateChoice}
+            />
           </div>
           <div className="mt-10 flex flex-wrap gap-4 max-w-[800px]">
             {embeddingChoice === "openai" && (
@@ -152,6 +169,32 @@ function EmbeddingSelection({ nextStep, prevStep, currentStep }) {
                 </div>
               </>
             )}
+
+            {embeddingChoice === "localai" && (
+              <>
+                <div className="flex flex-col w-60">
+                  <label className="text-white text-sm font-semibold block mb-4">
+                    LocalAI Base URL
+                  </label>
+                  <input
+                    type="url"
+                    name="EmbeddingBasePath"
+                    className="bg-zinc-900 text-white placeholder-white placeholder-opacity-60 text-sm rounded-lg focus:border-white block w-full p-2.5"
+                    placeholder="http://localhost:8080/v1"
+                    defaultValue={settings?.EmbeddingBasePath}
+                    onChange={(e) => setBasePathValue(e.target.value)}
+                    onBlur={updateBasePath}
+                    required={true}
+                    autoComplete="off"
+                    spellCheck={false}
+                  />
+                </div>
+                <LocalAIModelSelection
+                  settings={settings}
+                  basePath={basePath}
+                />
+              </>
+            )}
           </div>
         </div>
         <div className="flex w-full justify-between items-center p-6 space-x-2 border-t rounded-b border-gray-500/50">
@@ -174,4 +217,74 @@ function EmbeddingSelection({ nextStep, prevStep, currentStep }) {
   );
 }
 
+function LocalAIModelSelection({ settings, basePath = null }) {
+  const [customModels, setCustomModels] = useState([]);
+  const [loading, setLoading] = useState(true);
+
+  useEffect(() => {
+    async function findCustomModels() {
+      if (!basePath || !basePath.includes("/v1")) {
+        setCustomModels([]);
+        setLoading(false);
+        return;
+      }
+      setLoading(true);
+      const { models } = await System.customModels("localai", null, basePath);
+      setCustomModels(models || []);
+      setLoading(false);
+    }
+    findCustomModels();
+  }, [basePath]);
+
+  if (loading || customModels.length == 0) {
+    return (
+      <div className="flex flex-col w-60">
+        <label className="text-white text-sm font-semibold block mb-4">
+          Embedding Model Name
+        </label>
+        <select
+          name="EmbeddingModelPref"
+          disabled={true}
+          className="bg-zinc-900 border border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
+        >
+          <option disabled={true} selected={true}>
+            {basePath?.includes("/v1")
+              ? "-- loading available models --"
+              : "-- waiting for URL --"}
+          </option>
+        </select>
+      </div>
+    );
+  }
+
+  return (
+    <div className="flex flex-col w-60">
+      <label className="text-white text-sm font-semibold block mb-4">
+        Embedding Model Name
+      </label>
+      <select
+        name="EmbeddingModelPref"
+        required={true}
+        className="bg-zinc-900 border border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
+      >
+        {customModels.length > 0 && (
+          <optgroup label="Your loaded models">
+            {customModels.map((model) => {
+              return (
+                <option
+                  key={model.id}
+                  value={model.id}
+                  selected={settings.EmbeddingModelPref === model.id}
+                >
+                  {model.id}
+                </option>
+              );
+            })}
+          </optgroup>
+        )}
+      </select>
+    </div>
+  );
+}
+
 export default memo(EmbeddingSelection);
diff --git a/server/.env.example b/server/.env.example
index 127c0052622523e4230a825ba68240af75edde9b..03d1eb9bcefc0f3e5079bb4c564b1314dd8ddf2d 100644
--- a/server/.env.example
+++ b/server/.env.example
@@ -35,6 +35,15 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea
 # EMBEDDING_ENGINE='openai'
 # OPEN_AI_KEY=sk-xxxx
 
+# EMBEDDING_ENGINE='azure'
+# AZURE_OPENAI_ENDPOINT=
+# AZURE_OPENAI_KEY=
+# EMBEDDING_MODEL_PREF='my-embedder-model' # This is the "deployment" on Azure you want to use for embeddings. Not the base model. Valid base model is text-embedding-ada-002
+
+# EMBEDDING_ENGINE='localai'
+# EMBEDDING_BASE_PATH='https://localhost:8080/v1'
+# EMBEDDING_MODEL_PREF='text-embedding-ada-002'
+
 ###########################################
 ######## Vector Database Selection ########
 ###########################################
diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js
index 92ffea36f745638d3b8195641fdf195b80bc36f4..b4bedc7178dfd24499f80b764651d76738497a03 100644
--- a/server/models/systemSettings.js
+++ b/server/models/systemSettings.js
@@ -25,6 +25,8 @@ const SystemSettings = {
       MultiUserMode: await this.isMultiUserMode(),
       VectorDB: vectorDB,
       EmbeddingEngine: process.env.EMBEDDING_ENGINE,
+      EmbeddingBasePath: process.env.EMBEDDING_BASE_PATH,
+      EmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
       ...(vectorDB === "pinecone"
         ? {
             PineConeEnvironment: process.env.PINECONE_ENVIRONMENT,
diff --git a/server/utils/EmbeddingEngines/localAi/index.js b/server/utils/EmbeddingEngines/localAi/index.js
new file mode 100644
index 0000000000000000000000000000000000000000..d4b37d3997b9d122d147546317f448155026ec2e
--- /dev/null
+++ b/server/utils/EmbeddingEngines/localAi/index.js
@@ -0,0 +1,77 @@
+const { toChunks } = require("../../helpers");
+
+class LocalAiEmbedder {
+  constructor() {
+    const { Configuration, OpenAIApi } = require("openai");
+    if (!process.env.EMBEDDING_BASE_PATH)
+      throw new Error("No embedding base path was set.");
+    if (!process.env.EMBEDDING_MODEL_PREF)
+      throw new Error("No embedding model was set.");
+    const config = new Configuration({
+      basePath: process.env.EMBEDDING_BASE_PATH,
+    });
+    this.openai = new OpenAIApi(config);
+
+    // Arbitrary limit to ensure we stay within reasonable POST request size.
+    this.embeddingChunkLimit = 1_000;
+  }
+
+  async embedTextInput(textInput) {
+    const result = await this.embedChunks(textInput);
+    return result?.[0] || [];
+  }
+
+  async embedChunks(textChunks = []) {
+    const embeddingRequests = [];
+    for (const chunk of toChunks(textChunks, this.embeddingChunkLimit)) {
+      embeddingRequests.push(
+        new Promise((resolve) => {
+          this.openai
+            .createEmbedding({
+              model: process.env.EMBEDDING_MODEL_PREF,
+              input: chunk,
+            })
+            .then((res) => {
+              resolve({ data: res.data?.data, error: null });
+            })
+            .catch((e) => {
+              resolve({ data: [], error: e?.error });
+            });
+        })
+      );
+    }
+
+    const { data = [], error = null } = await Promise.all(
+      embeddingRequests
+    ).then((results) => {
+      // If any errors were returned from LocalAI abort the entire sequence because the embeddings
+      // will be incomplete.
+      const errors = results
+        .filter((res) => !!res.error)
+        .map((res) => res.error)
+        .flat();
+      if (errors.length > 0) {
+        return {
+          data: [],
+          error: `(${errors.length}) Embedding Errors! ${errors
+            .map((error) => `[${error.type}]: ${error.message}`)
+            .join(", ")}`,
+        };
+      }
+      return {
+        data: results.map((res) => res?.data || []).flat(),
+        error: null,
+      };
+    });
+
+    if (!!error) throw new Error(`LocalAI Failed to embed: ${error}`);
+    return data.length > 0 &&
+      data.every((embd) => embd.hasOwnProperty("embedding"))
+      ? data.map((embd) => embd.embedding)
+      : null;
+  }
+}
+
+module.exports = {
+  LocalAiEmbedder,
+};
diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js
index bbc69a0e82fb48e45e3f25507de2a5fddbb10c65..c7c61822141e0eda615e860d2b35eba129c5b9b1 100644
--- a/server/utils/helpers/index.js
+++ b/server/utils/helpers/index.js
@@ -59,6 +59,9 @@ function getEmbeddingEngineSelection() {
         AzureOpenAiEmbedder,
       } = require("../EmbeddingEngines/azureOpenAi");
       return new AzureOpenAiEmbedder();
+    case "localai":
+      const { LocalAiEmbedder } = require("../EmbeddingEngines/localAi");
+      return new LocalAiEmbedder();
     default:
       return null;
   }
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index 8290f7eb4b7edefa7d93eade7c81332174c73f86..b7ecffa140613455d04eff8bb9291bba8fd94503 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -72,6 +72,14 @@ const KEY_MAPPING = {
     envKey: "EMBEDDING_ENGINE",
     checks: [supportedEmbeddingModel],
   },
+  EmbeddingBasePath: {
+    envKey: "EMBEDDING_BASE_PATH",
+    checks: [isNotEmpty, validLLMExternalBasePath],
+  },
+  EmbeddingModelPref: {
+    envKey: "EMBEDDING_MODEL_PREF",
+    checks: [isNotEmpty],
+  },
 
   // Vector Database Selection Settings
   VectorDB: {
@@ -191,7 +199,7 @@ function validAnthropicModel(input = "") {
 }
 
 function supportedEmbeddingModel(input = "") {
-  const supported = ["openai", "azure"];
+  const supported = ["openai", "azure", "localai"];
   return supported.includes(input)
     ? null
     : `Invalid Embedding model type. Must be one of ${supported.join(", ")}.`;