diff --git a/README.md b/README.md
index 79564201d45f9bb6fbaf5b40b4c0229e07640c60..842efa07188796afd59323478ecf02c1875403a8 100644
--- a/README.md
+++ b/README.md
@@ -85,7 +85,7 @@ Some cool features of AnythingLLM
 - [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service)
 - [LocalAi (all)](https://localai.io/)
 - [Ollama (all)](https://ollama.ai/)
-<!-- - [LM Studio (all)](https://lmstudio.ai) -->
+- [LM Studio (all)](https://lmstudio.ai)
 
 **Supported Transcription models:**
 
diff --git a/docker/.env.example b/docker/.env.example
index 32f2a55d48d7a3dd4427424deee8d141652cc632..aabc139f8cde49d4016edaf225fbea15559cd366 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -85,10 +85,15 @@ GID='1000'
 # EMBEDDING_MODEL_MAX_CHUNK_LENGTH=1000 # The max chunk size in chars a string to embed can be
 
 # EMBEDDING_ENGINE='ollama'
-# EMBEDDING_BASE_PATH='http://127.0.0.1:11434'
+# EMBEDDING_BASE_PATH='http://host.docker.internal:11434'
 # EMBEDDING_MODEL_PREF='nomic-embed-text:latest'
 # EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
 
+# EMBEDDING_ENGINE='lmstudio'
+# EMBEDDING_BASE_PATH='https://host.docker.internal:1234/v1'
+# EMBEDDING_MODEL_PREF='nomic-ai/nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.Q4_0.gguf'
+# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
+
 ###########################################
 ######## Vector Database Selection ########
 ###########################################
diff --git a/frontend/src/components/EmbeddingSelection/LMStudioOptions/index.jsx b/frontend/src/components/EmbeddingSelection/LMStudioOptions/index.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..1192ce675fed914e381c1cedda05b6b53d0a6e72
--- /dev/null
+++ b/frontend/src/components/EmbeddingSelection/LMStudioOptions/index.jsx
@@ -0,0 +1,120 @@
+import React, { useEffect, useState } from "react";
+import System from "@/models/system";
+
+export default function LMStudioEmbeddingOptions({ settings }) {
+  const [basePathValue, setBasePathValue] = useState(
+    settings?.EmbeddingBasePath
+  );
+  const [basePath, setBasePath] = useState(settings?.EmbeddingBasePath);
+
+  return (
+    <div className="w-full flex flex-col gap-y-4">
+      <div className="w-full flex items-center gap-4">
+        <div className="flex flex-col w-60">
+          <label className="text-white text-sm font-semibold block mb-4">
+            LMStudio Base URL
+          </label>
+          <input
+            type="url"
+            name="EmbeddingBasePath"
+            className="bg-zinc-900 text-white placeholder-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
+            placeholder="http://localhost:1234/v1"
+            defaultValue={settings?.EmbeddingBasePath}
+            onChange={(e) => setBasePathValue(e.target.value)}
+            onBlur={() => setBasePath(basePathValue)}
+            required={true}
+            autoComplete="off"
+            spellCheck={false}
+          />
+        </div>
+        <LMStudioModelSelection settings={settings} basePath={basePath} />
+        <div className="flex flex-col w-60">
+          <label className="text-white text-sm font-semibold block mb-4">
+            Max embedding chunk length
+          </label>
+          <input
+            type="number"
+            name="EmbeddingModelMaxChunkLength"
+            className="bg-zinc-900 text-white placeholder-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
+            placeholder="8192"
+            min={1}
+            onScroll={(e) => e.target.blur()}
+            defaultValue={settings?.EmbeddingModelMaxChunkLength}
+            required={false}
+            autoComplete="off"
+          />
+        </div>
+      </div>
+    </div>
+  );
+}
+
+function LMStudioModelSelection({ settings, basePath = null }) {
+  const [customModels, setCustomModels] = useState([]);
+  const [loading, setLoading] = useState(true);
+
+  useEffect(() => {
+    async function findCustomModels() {
+      if (!basePath || !basePath.includes("/v1")) {
+        setCustomModels([]);
+        setLoading(false);
+        return;
+      }
+      setLoading(true);
+      const { models } = await System.customModels("lmstudio", null, basePath);
+      setCustomModels(models || []);
+      setLoading(false);
+    }
+    findCustomModels();
+  }, [basePath]);
+
+  if (loading || customModels.length == 0) {
+    return (
+      <div className="flex flex-col w-60">
+        <label className="text-white text-sm font-semibold block mb-4">
+          Chat Model Selection
+        </label>
+        <select
+          name="EmbeddingModelPref"
+          disabled={true}
+          className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
+        >
+          <option disabled={true} selected={true}>
+            {basePath?.includes("/v1")
+              ? "-- loading available models --"
+              : "-- waiting for URL --"}
+          </option>
+        </select>
+      </div>
+    );
+  }
+
+  return (
+    <div className="flex flex-col w-60">
+      <label className="text-white text-sm font-semibold block mb-4">
+        Chat Model Selection
+      </label>
+      <select
+        name="EmbeddingModelPref"
+        required={true}
+        className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
+      >
+        {customModels.length > 0 && (
+          <optgroup label="Your loaded models">
+            {customModels.map((model) => {
+              return (
+                <option
+                  key={model.id}
+                  value={model.id}
+                  selected={settings.EmbeddingModelPref === model.id}
+                >
+                  {model.id}
+                </option>
+              );
+            })}
+          </optgroup>
+        )}
+      </select>
+    </div>
+  );
+}
diff --git a/frontend/src/pages/GeneralSettings/EmbeddingPreference/index.jsx b/frontend/src/pages/GeneralSettings/EmbeddingPreference/index.jsx
index b3ecab4b1738dc9cd7952d1f96e6af29a03986f8..25dcd62d3ed1b42c865639a463bbde4aa9a83b58 100644
--- a/frontend/src/pages/GeneralSettings/EmbeddingPreference/index.jsx
+++ b/frontend/src/pages/GeneralSettings/EmbeddingPreference/index.jsx
@@ -8,6 +8,7 @@ import OpenAiLogo from "@/media/llmprovider/openai.png";
 import AzureOpenAiLogo from "@/media/llmprovider/azure.png";
 import LocalAiLogo from "@/media/llmprovider/localai.png";
 import OllamaLogo from "@/media/llmprovider/ollama.png";
+import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
 import PreLoader from "@/components/Preloader";
 import ChangeWarningModal from "@/components/ChangeWarning";
 import OpenAiOptions from "@/components/EmbeddingSelection/OpenAiOptions";
@@ -15,6 +16,7 @@ import AzureAiOptions from "@/components/EmbeddingSelection/AzureAiOptions";
 import LocalAiOptions from "@/components/EmbeddingSelection/LocalAiOptions";
 import NativeEmbeddingOptions from "@/components/EmbeddingSelection/NativeEmbeddingOptions";
 import OllamaEmbeddingOptions from "@/components/EmbeddingSelection/OllamaOptions";
+import LMStudioEmbeddingOptions from "@/components/EmbeddingSelection/LMStudioOptions";
 import EmbedderItem from "@/components/EmbeddingSelection/EmbedderItem";
 import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
 import { useModal } from "@/hooks/useModal";
@@ -58,6 +60,14 @@ const EMBEDDERS = [
     options: (settings) => <OllamaEmbeddingOptions settings={settings} />,
     description: "Run embedding models locally on your own machine.",
   },
+  {
+    name: "LM Studio",
+    value: "lmstudio",
+    logo: LMStudioLogo,
+    options: (settings) => <LMStudioEmbeddingOptions settings={settings} />,
+    description:
+      "Discover, download, and run thousands of cutting edge LLMs in a few clicks.",
+  },
 ];
 
 export default function GeneralEmbeddingPreference() {
diff --git a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
index bd8487842db00224b20abfa049fb8039f05aeaee..11612b99b0807d025d0ed8f8395f903a7d994a79 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
@@ -237,6 +237,13 @@ export const EMBEDDING_ENGINE_PRIVACY = {
     ],
     logo: OllamaLogo,
   },
+  lmstudio: {
+    name: "LMStudio",
+    description: [
+      "Your document text is embedded privately on the server running LMStudio",
+    ],
+    logo: LMStudioLogo,
+  },
 };
 
 export default function DataHandling({ setHeader, setForwardBtn, setBackBtn }) {
diff --git a/frontend/src/pages/OnboardingFlow/Steps/EmbeddingPreference/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/EmbeddingPreference/index.jsx
index 1932309e43f514a38b5062fb59ba090c3ee4b588..fc44a68de96633ad006c123371d76f690ba3677a 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/EmbeddingPreference/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/EmbeddingPreference/index.jsx
@@ -5,11 +5,13 @@ import OpenAiLogo from "@/media/llmprovider/openai.png";
 import AzureOpenAiLogo from "@/media/llmprovider/azure.png";
 import LocalAiLogo from "@/media/llmprovider/localai.png";
 import OllamaLogo from "@/media/llmprovider/ollama.png";
+import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
 import NativeEmbeddingOptions from "@/components/EmbeddingSelection/NativeEmbeddingOptions";
 import OpenAiOptions from "@/components/EmbeddingSelection/OpenAiOptions";
 import AzureAiOptions from "@/components/EmbeddingSelection/AzureAiOptions";
 import LocalAiOptions from "@/components/EmbeddingSelection/LocalAiOptions";
 import OllamaEmbeddingOptions from "@/components/EmbeddingSelection/OllamaOptions";
+import LMStudioEmbeddingOptions from "@/components/EmbeddingSelection/LMStudioOptions";
 import EmbedderItem from "@/components/EmbeddingSelection/EmbedderItem";
 import System from "@/models/system";
 import paths from "@/utils/paths";
@@ -19,6 +21,52 @@ import { useNavigate } from "react-router-dom";
 const TITLE = "Embedding Preference";
 const DESCRIPTION =
   "AnythingLLM can work with many embedding models. This will be the model which turns documents into vectors.";
+const EMBEDDERS = [
+  {
+    name: "AnythingLLM Embedder",
+    value: "native",
+    logo: AnythingLLMIcon,
+    options: (settings) => <NativeEmbeddingOptions settings={settings} />,
+    description:
+      "Use the built-in embedding engine for AnythingLLM. Zero setup!",
+  },
+  {
+    name: "OpenAI",
+    value: "openai",
+    logo: OpenAiLogo,
+    options: (settings) => <OpenAiOptions settings={settings} />,
+    description: "The standard option for most non-commercial use.",
+  },
+  {
+    name: "Azure OpenAI",
+    value: "azure",
+    logo: AzureOpenAiLogo,
+    options: (settings) => <AzureAiOptions settings={settings} />,
+    description: "The enterprise option of OpenAI hosted on Azure services.",
+  },
+  {
+    name: "Local AI",
+    value: "localai",
+    logo: LocalAiLogo,
+    options: (settings) => <LocalAiOptions settings={settings} />,
+    description: "Run embedding models locally on your own machine.",
+  },
+  {
+    name: "Ollama",
+    value: "ollama",
+    logo: OllamaLogo,
+    options: (settings) => <OllamaEmbeddingOptions settings={settings} />,
+    description: "Run embedding models locally on your own machine.",
+  },
+  {
+    name: "LM Studio",
+    value: "lmstudio",
+    logo: LMStudioLogo,
+    options: (settings) => <LMStudioEmbeddingOptions settings={settings} />,
+    description:
+      "Discover, download, and run thousands of cutting edge LLMs in a few clicks.",
+  },
+];
 
 export default function EmbeddingPreference({
   setHeader,
@@ -42,45 +90,6 @@ export default function EmbeddingPreference({
     fetchKeys();
   }, []);
 
-  const EMBEDDERS = [
-    {
-      name: "AnythingLLM Embedder",
-      value: "native",
-      logo: AnythingLLMIcon,
-      options: <NativeEmbeddingOptions settings={settings} />,
-      description:
-        "Use the built-in embedding engine for AnythingLLM. Zero setup!",
-    },
-    {
-      name: "OpenAI",
-      value: "openai",
-      logo: OpenAiLogo,
-      options: <OpenAiOptions settings={settings} />,
-      description: "The standard option for most non-commercial use.",
-    },
-    {
-      name: "Azure OpenAI",
-      value: "azure",
-      logo: AzureOpenAiLogo,
-      options: <AzureAiOptions settings={settings} />,
-      description: "The enterprise option of OpenAI hosted on Azure services.",
-    },
-    {
-      name: "Local AI",
-      value: "localai",
-      logo: LocalAiLogo,
-      options: <LocalAiOptions settings={settings} />,
-      description: "Run embedding models locally on your own machine.",
-    },
-    {
-      name: "Ollama",
-      value: "ollama",
-      logo: OllamaLogo,
-      options: <OllamaEmbeddingOptions settings={settings} />,
-      description: "Run embedding models locally on your own machine.",
-    },
-  ];
-
   function handleForward() {
     if (hiddenSubmitButtonRef.current) {
       hiddenSubmitButtonRef.current.click();
@@ -161,8 +170,9 @@ export default function EmbeddingPreference({
         </div>
         <div className="mt-4 flex flex-col gap-y-1">
           {selectedEmbedder &&
-            EMBEDDERS.find((embedder) => embedder.value === selectedEmbedder)
-              ?.options}
+            EMBEDDERS.find(
+              (embedder) => embedder.value === selectedEmbedder
+            )?.options(settings)}
         </div>
         <button
           type="submit"
diff --git a/server/.env.example b/server/.env.example
index 47cda159e7cb090312829946197a4787db579f12..131dcf895e0da1073166bf26be32954cda4f5070 100644
--- a/server/.env.example
+++ b/server/.env.example
@@ -86,6 +86,11 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea
 # EMBEDDING_MODEL_PREF='nomic-embed-text:latest'
 # EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
 
+# EMBEDDING_ENGINE='lmstudio'
+# EMBEDDING_BASE_PATH='https://localhost:1234/v1'
+# EMBEDDING_MODEL_PREF='nomic-ai/nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.Q4_0.gguf'
+# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
+
 ###########################################
 ######## Vector Database Selection ########
 ###########################################
diff --git a/server/utils/EmbeddingEngines/lmstudio/index.js b/server/utils/EmbeddingEngines/lmstudio/index.js
new file mode 100644
index 0000000000000000000000000000000000000000..b19ea262224113d73eb0f554b30233193c3c2bbc
--- /dev/null
+++ b/server/utils/EmbeddingEngines/lmstudio/index.js
@@ -0,0 +1,110 @@
+const { maximumChunkLength } = require("../../helpers");
+
+class LMStudioEmbedder {
+  constructor() {
+    if (!process.env.EMBEDDING_BASE_PATH)
+      throw new Error("No embedding base path was set.");
+    if (!process.env.EMBEDDING_MODEL_PREF)
+      throw new Error("No embedding model was set.");
+    this.basePath = `${process.env.EMBEDDING_BASE_PATH}/embeddings`;
+    this.model = process.env.EMBEDDING_MODEL_PREF;
+
+    // Limit of how many strings we can process in a single pass to stay with resource or network limits
+    // Limit of how many strings we can process in a single pass to stay with resource or network limits
+    this.maxConcurrentChunks = 1;
+    this.embeddingMaxChunkLength = maximumChunkLength();
+  }
+
+  log(text, ...args) {
+    console.log(`\x1b[36m[${this.constructor.name}]\x1b[0m ${text}`, ...args);
+  }
+
+  async #isAlive() {
+    return await fetch(`${this.basePath}/models`, {
+      method: "HEAD",
+    })
+      .then((res) => res.ok)
+      .catch((e) => {
+        this.log(e.message);
+        return false;
+      });
+  }
+
+  async embedTextInput(textInput) {
+    const result = await this.embedChunks(textInput);
+    return result?.[0] || [];
+  }
+
+  async embedChunks(textChunks = []) {
+    if (!(await this.#isAlive()))
+      throw new Error(
+        `LMStudio service could not be reached. Is LMStudio running?`
+      );
+
+    this.log(
+      `Embedding ${textChunks.length} chunks of text with ${this.model}.`
+    );
+
+    // LMStudio will drop all queued requests now? So if there are many going on
+    // we need to do them sequentially or else only the first resolves and the others
+    // get dropped or go unanswered >:(
+    let results = [];
+    let hasError = false;
+    for (const chunk of textChunks) {
+      if (hasError) break; // If an error occurred don't continue and exit early.
+      results.push(
+        await fetch(this.basePath, {
+          method: "POST",
+          headers: {
+            "Content-Type": "application/json",
+          },
+          body: JSON.stringify({
+            model: this.model,
+            input: chunk,
+          }),
+        })
+          .then((res) => res.json())
+          .then((json) => {
+            const embedding = json.data[0].embedding;
+            if (!Array.isArray(embedding) || !embedding.length)
+              throw {
+                type: "EMPTY_ARR",
+                message: "The embedding was empty from LMStudio",
+              };
+            return { data: embedding, error: null };
+          })
+          .catch((error) => {
+            hasError = true;
+            return { data: [], error };
+          })
+      );
+    }
+
+    // Accumulate errors from embedding.
+    // If any are present throw an abort error.
+    const errors = results
+      .filter((res) => !!res.error)
+      .map((res) => res.error)
+      .flat();
+
+    if (errors.length > 0) {
+      let uniqueErrors = new Set();
+      console.log(errors);
+      errors.map((error) =>
+        uniqueErrors.add(`[${error.type}]: ${error.message}`)
+      );
+
+      if (errors.length > 0)
+        throw new Error(
+          `LMStudio Failed to embed: ${Array.from(uniqueErrors).join(", ")}`
+        );
+    }
+
+    const data = results.map((res) => res?.data || []);
+    return data.length > 0 ? data : null;
+  }
+}
+
+module.exports = {
+  LMStudioEmbedder,
+};
diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js
index 3d8bb915f352602be3fe6734477c5ec991c7f0f0..837a28f60a7ee1551ccc5c42a395c7ced60df865 100644
--- a/server/utils/helpers/index.js
+++ b/server/utils/helpers/index.js
@@ -102,6 +102,9 @@ function getEmbeddingEngineSelection() {
     case "native":
       const { NativeEmbedder } = require("../EmbeddingEngines/native");
       return new NativeEmbedder();
+    case "lmstudio":
+      const { LMStudioEmbedder } = require("../EmbeddingEngines/lmstudio");
+      return new LMStudioEmbedder();
     default:
       return null;
   }
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index 17a4172a4d11ad16d769423c709327c8e4dbc915..ee9d4effabe4a5bdb1d1f4c2ede2d4c55195b80d 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -408,7 +408,14 @@ function validAnthropicModel(input = "") {
 }
 
 function supportedEmbeddingModel(input = "") {
-  const supported = ["openai", "azure", "localai", "native", "ollama"];
+  const supported = [
+    "openai",
+    "azure",
+    "localai",
+    "native",
+    "ollama",
+    "lmstudio",
+  ];
   return supported.includes(input)
     ? null
     : `Invalid Embedding model type. Must be one of ${supported.join(", ")}.`;