diff --git a/README.md b/README.md
index 032b589299b6cef649c0926057c14d1136947469..00301a3686460e9a5c375c3faafdb46e4b7ef97b 100644
--- a/README.md
+++ b/README.md
@@ -52,9 +52,10 @@ Some cool features of AnythingLLM
 
 ### Supported LLMs and Vector Databases
 **Supported LLMs:**
-- OpenAI
-- Azure OpenAI
-- Anthropic ClaudeV2
+- [OpenAI](https://openai.com)
+- [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service)
+- [Anthropic ClaudeV2](https://www.anthropic.com/)
+- [LM Studio (all models)](https://lmstudio.ai)
 
 **Supported Vector Databases:**
 - [LanceDB](https://github.com/lancedb/lancedb) (default)
@@ -73,7 +74,7 @@ This monorepo consists of three main sections:
 ### Requirements
 - `yarn` and `node` on your machine
 - `python` 3.9+ for running scripts in `collector/`.
-- access to an LLM like `GPT-3.5`, `GPT-4`, etc.
+- access to an LLM service like `GPT-3.5`, `GPT-4`, `Mistral`, `LLama`, etc.
 - (optional) a vector database like Pinecone, qDrant, Weaviate, or Chroma*.
 *AnythingLLM by default uses a built-in vector db called LanceDB.
 
diff --git a/docker/.env.example b/docker/.env.example
index 4ab09a1e2e3ab3d71ce8f5c288dcc1bdd5f77baf..1bd2b70828bd0336393045440cc6c306acf65925 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -19,6 +19,10 @@ CACHE_VECTORS="true"
 # ANTHROPIC_API_KEY=sk-ant-xxxx
 # ANTHROPIC_MODEL_PREF='claude-2'
 
+# LLM_PROVIDER='lmstudio'
+# LMSTUDIO_BASE_PATH='http://your-server:1234/v1'
+# LMSTUDIO_MODEL_TOKEN_LIMIT=4096
+
 ###########################################
 ######## Embedding API SElECTION ##########
 ###########################################
diff --git a/frontend/src/components/LLMSelection/LMStudioOptions/index.jsx b/frontend/src/components/LLMSelection/LMStudioOptions/index.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..1f00c070d4c0e89f0d8c0125428db6657d2d50dc
--- /dev/null
+++ b/frontend/src/components/LLMSelection/LMStudioOptions/index.jsx
@@ -0,0 +1,59 @@
+import { Info } from "@phosphor-icons/react";
+import paths from "../../../utils/paths";
+
+export default function LMStudioOptions({ settings, showAlert = false }) {
+  return (
+    <div className="w-full flex flex-col">
+      {showAlert && (
+        <div className="flex flex-col md:flex-row md:items-center gap-x-2 text-white mb-6 bg-blue-800/30 w-fit rounded-lg px-4 py-2">
+          <div className="gap-x-2 flex items-center">
+            <Info size={12} className="hidden md:visible" />
+            <p className="text-sm md:text-base">
+              LMStudio as your LLM requires you to set an embedding service to
+              use.
+            </p>
+          </div>
+          <a
+            href={paths.general.embeddingPreference()}
+            className="text-sm md:text-base my-2 underline"
+          >
+            Manage embedding &rarr;
+          </a>
+        </div>
+      )}
+      <div className="w-full flex items-center gap-4">
+        <div className="flex flex-col w-60">
+          <label className="text-white text-sm font-semibold block mb-4">
+            LMStudio Base URL
+          </label>
+          <input
+            type="url"
+            name="LMStudioBasePath"
+            className="bg-zinc-900 text-white placeholder-white placeholder-opacity-60 text-sm rounded-lg focus:border-white block w-full p-2.5"
+            placeholder="http://localhost:1234/v1"
+            defaultValue={settings?.LMStudioBasePath}
+            required={true}
+            autoComplete="off"
+            spellCheck={false}
+          />
+        </div>
+        <div className="flex flex-col w-60">
+          <label className="text-white text-sm font-semibold block mb-4">
+            Token context window
+          </label>
+          <input
+            type="number"
+            name="LMStudioTokenLimit"
+            className="bg-zinc-900 text-white placeholder-white placeholder-opacity-60 text-sm rounded-lg focus:border-white block w-full p-2.5"
+            placeholder="4096"
+            min={1}
+            onScroll={(e) => e.target.blur()}
+            defaultValue={settings?.LMStudioTokenLimit}
+            required={true}
+            autoComplete="off"
+          />
+        </div>
+      </div>
+    </div>
+  );
+}
diff --git a/frontend/src/media/llmprovider/lmstudio.png b/frontend/src/media/llmprovider/lmstudio.png
new file mode 100644
index 0000000000000000000000000000000000000000..a5dc75afb71539541e5fc6a9a54a088df72ab763
Binary files /dev/null and b/frontend/src/media/llmprovider/lmstudio.png differ
diff --git a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
index e933ab5ee413fae2ba960c675cbec9dd5989fc3f..f2883d05a959c14714a1f2ca83c74243b7fd92f4 100644
--- a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
+++ b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
@@ -8,11 +8,13 @@ import showToast from "../../../utils/toast";
 import OpenAiLogo from "../../../media/llmprovider/openai.png";
 import AzureOpenAiLogo from "../../../media/llmprovider/azure.png";
 import AnthropicLogo from "../../../media/llmprovider/anthropic.png";
+import LMStudioLogo from "../../../media/llmprovider/LMStudio.png";
 import PreLoader from "../../../components/Preloader";
 import LLMProviderOption from "../../../components/LLMSelection/LLMProviderOption";
 import OpenAiOptions from "../../../components/LLMSelection/OpenAiOptions";
 import AzureAiOptions from "../../../components/LLMSelection/AzureAiOptions";
 import AnthropicAiOptions from "../../../components/LLMSelection/AnthropicAiOptions";
+import LMStudioOptions from "../../../components/LLMSelection/LMStudioOptions";
 
 export default function GeneralLLMPreference() {
   const [saving, setSaving] = useState(false);
@@ -130,6 +132,15 @@ export default function GeneralLLMPreference() {
                   image={AnthropicLogo}
                   onClick={updateLLMChoice}
                 />
+                <LLMProviderOption
+                  name="LM Studio"
+                  value="lmstudio"
+                  link="lmstudio.ai"
+                  description="Discover, download, and run thousands of cutting edge LLMs in a few clicks."
+                  checked={llmChoice === "lmstudio"}
+                  image={LMStudioLogo}
+                  onClick={updateLLMChoice}
+                />
               </div>
               <div className="mt-10 flex flex-wrap gap-4 max-w-[800px]">
                 {llmChoice === "openai" && (
@@ -141,6 +152,9 @@ export default function GeneralLLMPreference() {
                 {llmChoice === "anthropic" && (
                   <AnthropicAiOptions settings={settings} showAlert={true} />
                 )}
+                {llmChoice === "lmstudio" && (
+                  <LMStudioOptions settings={settings} showAlert={true} />
+                )}
               </div>
             </div>
           </form>
diff --git a/frontend/src/pages/OnboardingFlow/OnboardingModal/Steps/LLMSelection/index.jsx b/frontend/src/pages/OnboardingFlow/OnboardingModal/Steps/LLMSelection/index.jsx
index 3a19d38740acaaf29f97b01f722d73ac7cdf6f06..429a0a6614ec136ca07f5ad0863129c88ff705b6 100644
--- a/frontend/src/pages/OnboardingFlow/OnboardingModal/Steps/LLMSelection/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/OnboardingModal/Steps/LLMSelection/index.jsx
@@ -2,12 +2,14 @@ import React, { memo, useEffect, useState } from "react";
 import OpenAiLogo from "../../../../../media/llmprovider/openai.png";
 import AzureOpenAiLogo from "../../../../../media/llmprovider/azure.png";
 import AnthropicLogo from "../../../../../media/llmprovider/anthropic.png";
+import LMStudioLogo from "../../../../../media/llmprovider/lmstudio.png";
 import System from "../../../../../models/system";
 import PreLoader from "../../../../../components/Preloader";
 import LLMProviderOption from "../../../../../components/LLMSelection/LLMProviderOption";
 import OpenAiOptions from "../../../../../components/LLMSelection/OpenAiOptions";
 import AzureAiOptions from "../../../../../components/LLMSelection/AzureAiOptions";
 import AnthropicAiOptions from "../../../../../components/LLMSelection/AnthropicAiOptions";
+import LMStudioOptions from "../../../../../components/LLMSelection/LMStudioOptions";
 
 function LLMSelection({ nextStep, prevStep, currentStep }) {
   const [llmChoice, setLLMChoice] = useState("openai");
@@ -46,6 +48,8 @@ function LLMSelection({ nextStep, prevStep, currentStep }) {
     switch (data.LLMProvider) {
       case "anthropic":
         return nextStep("embedding_preferences");
+      case "lmstudio":
+        return nextStep("embedding_preferences");
       default:
         return nextStep("vector_database");
     }
@@ -94,6 +98,15 @@ function LLMSelection({ nextStep, prevStep, currentStep }) {
               image={AnthropicLogo}
               onClick={updateLLMChoice}
             />
+            <LLMProviderOption
+              name="LM Studio"
+              value="lmstudio"
+              link="lmstudio.ai"
+              description="Discover, download, and run thousands of cutting edge LLMs in a few clicks."
+              checked={llmChoice === "lmstudio"}
+              image={LMStudioLogo}
+              onClick={updateLLMChoice}
+            />
           </div>
           <div className="mt-10 flex flex-wrap gap-4 max-w-[800px]">
             {llmChoice === "openai" && <OpenAiOptions settings={settings} />}
@@ -101,6 +114,9 @@ function LLMSelection({ nextStep, prevStep, currentStep }) {
             {llmChoice === "anthropic" && (
               <AnthropicAiOptions settings={settings} />
             )}
+            {llmChoice === "lmstudio" && (
+              <LMStudioOptions settings={settings} />
+            )}
           </div>
         </div>
         <div className="flex w-full justify-between items-center p-6 space-x-2 border-t rounded-b border-gray-500/50">
diff --git a/server/.env.example b/server/.env.example
index d7a9cbe76999faf2b324fc32f68fa28e100049cc..327aa6eee5a62246bd31a36d07300d6bf43f1b6b 100644
--- a/server/.env.example
+++ b/server/.env.example
@@ -19,6 +19,10 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea
 # ANTHROPIC_API_KEY=sk-ant-xxxx
 # ANTHROPIC_MODEL_PREF='claude-2'
 
+# LLM_PROVIDER='lmstudio'
+# LMSTUDIO_BASE_PATH='http://your-server:1234/v1'
+# LMSTUDIO_MODEL_TOKEN_LIMIT=4096
+
 ###########################################
 ######## Embedding API SElECTION ##########
 ###########################################
@@ -58,4 +62,4 @@ VECTOR_DB="lancedb"
 # CLOUD DEPLOYMENT VARIRABLES ONLY
 # AUTH_TOKEN="hunter2" # This is the password to your application if remote hosting.
 # STORAGE_DIR= # absolute filesystem path with no trailing slash
-# NO_DEBUG="true"
\ No newline at end of file
+# NO_DEBUG="true"
diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js
index d15f73060b7c7d2732ab5909d0e2c89fa5dc0067..b28c5e865ff83710217b104c7bf55c8ebf94b11c 100644
--- a/server/models/systemSettings.js
+++ b/server/models/systemSettings.js
@@ -81,6 +81,19 @@ const SystemSettings = {
             AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
           }
         : {}),
+
+      ...(llmProvider === "lmstudio"
+        ? {
+            LMStudioBasePath: process.env.LMSTUDIO_BASE_PATH,
+            LMStudioTokenLimit: process.env.LMSTUDIO_MODEL_TOKEN_LIMIT,
+
+            // For embedding credentials when lmstudio is selected.
+            OpenAiKey: !!process.env.OPEN_AI_KEY,
+            AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT,
+            AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY,
+            AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
+          }
+        : {}),
     };
   },
 
diff --git a/server/utils/AiProviders/lmStudio/index.js b/server/utils/AiProviders/lmStudio/index.js
new file mode 100644
index 0000000000000000000000000000000000000000..bb025b3b1787d25393a2b0c495396723b8b16e8c
--- /dev/null
+++ b/server/utils/AiProviders/lmStudio/index.js
@@ -0,0 +1,139 @@
+const { chatPrompt } = require("../../chats");
+
+//  hybrid of openAi LLM chat completion for LMStudio
+class LMStudioLLM {
+  constructor(embedder = null) {
+    if (!process.env.LMSTUDIO_BASE_PATH)
+      throw new Error("No LMStudio API Base Path was set.");
+
+    const { Configuration, OpenAIApi } = require("openai");
+    const config = new Configuration({
+      basePath: process.env.LMSTUDIO_BASE_PATH?.replace(/\/+$/, ""), // here is the URL to your LMStudio instance
+    });
+    this.lmstudio = new OpenAIApi(config);
+    // When using LMStudios inference server - the model param is not required so
+    // we can stub it here.
+    this.model = "model-placeholder";
+    this.limits = {
+      history: this.promptWindowLimit() * 0.15,
+      system: this.promptWindowLimit() * 0.15,
+      user: this.promptWindowLimit() * 0.7,
+    };
+
+    if (!embedder)
+      throw new Error(
+        "INVALID LM STUDIO SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use LMStudio as your LLM."
+      );
+    this.embedder = embedder;
+  }
+
+  // Ensure the user set a value for the token limit
+  // and if undefined - assume 4096 window.
+  promptWindowLimit() {
+    const limit = process.env.LMSTUDIO_MODEL_TOKEN_LIMIT || 4096;
+    if (!limit || isNaN(Number(limit)))
+      throw new Error("No LMStudio token context limit was set.");
+    return Number(limit);
+  }
+
+  async isValidChatCompletionModel(_ = "") {
+    // LMStudio may be anything. The user must do it correctly.
+    // See comment about this.model declaration in constructor
+    return true;
+  }
+
+  constructPrompt({
+    systemPrompt = "",
+    contextTexts = [],
+    chatHistory = [],
+    userPrompt = "",
+  }) {
+    const prompt = {
+      role: "system",
+      content: `${systemPrompt}
+Context:
+    ${contextTexts
+      .map((text, i) => {
+        return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+      })
+      .join("")}`,
+    };
+    return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
+  }
+
+  async isSafe(_input = "") {
+    // Not implemented so must be stubbed
+    return { safe: true, reasons: [] };
+  }
+
+  async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
+    if (!this.model)
+      throw new Error(
+        `LMStudio chat: ${model} is not valid or defined for chat completion!`
+      );
+
+    const textResponse = await this.lmstudio
+      .createChatCompletion({
+        model: this.model,
+        temperature: Number(workspace?.openAiTemp ?? 0.7),
+        n: 1,
+        messages: await this.compressMessages(
+          {
+            systemPrompt: chatPrompt(workspace),
+            userPrompt: prompt,
+            chatHistory,
+          },
+          rawHistory
+        ),
+      })
+      .then((json) => {
+        const res = json.data;
+        if (!res.hasOwnProperty("choices"))
+          throw new Error("LMStudio chat: No results!");
+        if (res.choices.length === 0)
+          throw new Error("LMStudio chat: No results length!");
+        return res.choices[0].message.content;
+      })
+      .catch((error) => {
+        throw new Error(
+          `LMStudio::createChatCompletion failed with: ${error.message}`
+        );
+      });
+
+    return textResponse;
+  }
+
+  async getChatCompletion(messages = null, { temperature = 0.7 }) {
+    if (!this.model)
+      throw new Error(
+        `LMStudio chat: ${this.model} is not valid or defined model for chat completion!`
+      );
+
+    const { data } = await this.lmstudio.createChatCompletion({
+      model: this.model,
+      messages,
+      temperature,
+    });
+
+    if (!data.hasOwnProperty("choices")) return null;
+    return data.choices[0].message.content;
+  }
+
+  // Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
+  async embedTextInput(textInput) {
+    return await this.embedder.embedTextInput(textInput);
+  }
+  async embedChunks(textChunks = []) {
+    return await this.embedder.embedChunks(textChunks);
+  }
+
+  async compressMessages(promptArgs = {}, rawHistory = []) {
+    const { messageArrayCompressor } = require("../../helpers/chat");
+    const messageArray = this.constructPrompt(promptArgs);
+    return await messageArrayCompressor(this, messageArray, rawHistory);
+  }
+}
+
+module.exports = {
+  LMStudioLLM,
+};
diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js
index 9df2e8f123c62ff29aefcf387c37cf486a9e6e5d..cf48937a373f6f6fb3183b07e95aaa0936d5a7e2 100644
--- a/server/utils/helpers/index.js
+++ b/server/utils/helpers/index.js
@@ -23,6 +23,7 @@ function getVectorDbClass() {
 
 function getLLMProvider() {
   const vectorSelection = process.env.LLM_PROVIDER || "openai";
+  let embedder = null;
   switch (vectorSelection) {
     case "openai":
       const { OpenAiLLM } = require("../AiProviders/openAi");
@@ -32,8 +33,12 @@ function getLLMProvider() {
       return new AzureOpenAiLLM();
     case "anthropic":
       const { AnthropicLLM } = require("../AiProviders/anthropic");
-      const embedder = getEmbeddingEngineSelection();
+      embedder = getEmbeddingEngineSelection();
       return new AnthropicLLM(embedder);
+    case "lmstudio":
+      const { LMStudioLLM } = require("../AiProviders/lmStudio");
+      embedder = getEmbeddingEngineSelection();
+      return new LMStudioLLM(embedder);
     default:
       throw new Error("ENV: No LLM_PROVIDER value found in environment!");
   }
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index 976849d923db2df07e09c15243856c51270a356b..e97f97917f9338a9981219d274f7de16b4d02601 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -44,6 +44,16 @@ const KEY_MAPPING = {
     checks: [isNotEmpty, validAnthropicModel],
   },
 
+  // LMStudio Settings
+  LMStudioBasePath: {
+    envKey: "LMSTUDIO_BASE_PATH",
+    checks: [isNotEmpty, validLMStudioBasePath],
+  },
+  LMStudioTokenLimit: {
+    envKey: "LMSTUDIO_MODEL_TOKEN_LIMIT",
+    checks: [nonZero],
+  },
+
   EmbeddingEngine: {
     envKey: "EMBEDDING_ENGINE",
     checks: [supportedEmbeddingModel],
@@ -117,6 +127,11 @@ function isNotEmpty(input = "") {
   return !input || input.length === 0 ? "Value cannot be empty" : null;
 }
 
+function nonZero(input = "") {
+  if (isNaN(Number(input))) return "Value must be a number";
+  return Number(input) <= 0 ? "Value must be greater than zero" : null;
+}
+
 function isValidURL(input = "") {
   try {
     new URL(input);
@@ -136,8 +151,20 @@ function validAnthropicApiKey(input = "") {
     : "Anthropic Key must start with sk-ant-";
 }
 
+function validLMStudioBasePath(input = "") {
+  try {
+    new URL(input);
+    if (!input.includes("v1")) return "URL must include /v1";
+    if (input.split("").slice(-1)?.[0] === "/")
+      return "URL cannot end with a slash";
+    return null;
+  } catch {
+    return "Not a valid URL";
+  }
+}
+
 function supportedLLM(input = "") {
-  return ["openai", "azure", "anthropic"].includes(input);
+  return ["openai", "azure", "anthropic", "lmstudio"].includes(input);
 }
 
 function validAnthropicModel(input = "") {