diff --git a/README.md b/README.md
index d42f6fe913f0eed2ef08c255221c0a4d76cf88b2..68c21e4b5a26bf648c744946524f2bb76b4f60b2 100644
--- a/README.md
+++ b/README.md
@@ -87,6 +87,7 @@ AnythingLLM divides your documents into objects called `workspaces`. A Workspace
 - [Fireworks AI  (chat models)](https://fireworks.ai/)
 - [Perplexity (chat models)](https://www.perplexity.ai/)
 - [OpenRouter (chat models)](https://openrouter.ai/)
+- [DeepSeek (chat models)](https://deepseek.com/)
 - [Mistral](https://mistral.ai/)
 - [Groq](https://groq.com/)
 - [Cohere](https://cohere.com/)
diff --git a/frontend/src/components/LLMSelection/DeepSeekOptions/index.jsx b/frontend/src/components/LLMSelection/DeepSeekOptions/index.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..5c83d65a94c1defacfa914ef506a62c76aad67fe
--- /dev/null
+++ b/frontend/src/components/LLMSelection/DeepSeekOptions/index.jsx
@@ -0,0 +1,100 @@
+import { useState, useEffect } from "react";
+import System from "@/models/system";
+
+export default function DeepSeekOptions({ settings }) {
+  const [inputValue, setInputValue] = useState(settings?.DeepSeekApiKey);
+  const [deepSeekApiKey, setDeepSeekApiKey] = useState(
+    settings?.DeepSeekApiKey
+  );
+
+  return (
+    <div className="flex gap-[36px] mt-1.5">
+      <div className="flex flex-col w-60">
+        <label className="text-white text-sm font-semibold block mb-3">
+          API Key
+        </label>
+        <input
+          type="password"
+          name="DeepSeekApiKey"
+          className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
+          placeholder="DeepSeek API Key"
+          defaultValue={settings?.DeepSeekApiKey ? "*".repeat(20) : ""}
+          required={true}
+          autoComplete="off"
+          spellCheck={false}
+          onChange={(e) => setInputValue(e.target.value)}
+          onBlur={() => setDeepSeekApiKey(inputValue)}
+        />
+      </div>
+      {!settings?.credentialsOnly && (
+        <DeepSeekModelSelection settings={settings} apiKey={deepSeekApiKey} />
+      )}
+    </div>
+  );
+}
+
+function DeepSeekModelSelection({ apiKey, settings }) {
+  const [models, setModels] = useState([]);
+  const [loading, setLoading] = useState(true);
+
+  useEffect(() => {
+    async function findCustomModels() {
+      if (!apiKey) {
+        setModels([]);
+        setLoading(true);
+        return;
+      }
+
+      setLoading(true);
+      const { models } = await System.customModels(
+        "deepseek",
+        typeof apiKey === "boolean" ? null : apiKey
+      );
+      setModels(models || []);
+      setLoading(false);
+    }
+    findCustomModels();
+  }, [apiKey]);
+
+  if (loading) {
+    return (
+      <div className="flex flex-col w-60">
+        <label className="text-white text-sm font-semibold block mb-3">
+          Chat Model Selection
+        </label>
+        <select
+          name="DeepSeekModelPref"
+          disabled={true}
+          className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
+        >
+          <option disabled={true} selected={true}>
+            -- loading available models --
+          </option>
+        </select>
+      </div>
+    );
+  }
+
+  return (
+    <div className="flex flex-col w-60">
+      <label className="text-white text-sm font-semibold block mb-3">
+        Chat Model Selection
+      </label>
+      <select
+        name="DeepSeekModelPref"
+        required={true}
+        className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
+      >
+        {models.map((model) => (
+          <option
+            key={model.id}
+            value={model.id}
+            selected={settings?.DeepSeekModelPref === model.id}
+          >
+            {model.name}
+          </option>
+        ))}
+      </select>
+    </div>
+  );
+}
diff --git a/frontend/src/media/llmprovider/deepseek.png b/frontend/src/media/llmprovider/deepseek.png
new file mode 100644
index 0000000000000000000000000000000000000000..bb8b9f2734e58208c65e477617c7d23ac76c7ada
Binary files /dev/null and b/frontend/src/media/llmprovider/deepseek.png differ
diff --git a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
index a70b55cc8762cb5587cb5094a515899ecafe273d..6b041c1175185fbc02b7d5fa7941f5f72a831cd2 100644
--- a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
+++ b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
@@ -25,6 +25,7 @@ import TextGenWebUILogo from "@/media/llmprovider/text-generation-webui.png";
 import CohereLogo from "@/media/llmprovider/cohere.png";
 import LiteLLMLogo from "@/media/llmprovider/litellm.png";
 import AWSBedrockLogo from "@/media/llmprovider/bedrock.png";
+import DeepSeekLogo from "@/media/llmprovider/deepseek.png";
 
 import PreLoader from "@/components/Preloader";
 import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
@@ -48,6 +49,7 @@ import KoboldCPPOptions from "@/components/LLMSelection/KoboldCPPOptions";
 import TextGenWebUIOptions from "@/components/LLMSelection/TextGenWebUIOptions";
 import LiteLLMOptions from "@/components/LLMSelection/LiteLLMOptions";
 import AWSBedrockLLMOptions from "@/components/LLMSelection/AwsBedrockLLMOptions";
+import DeepSeekOptions from "@/components/LLMSelection/DeepSeekOptions";
 
 import LLMItem from "@/components/LLMSelection/LLMItem";
 import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
@@ -211,6 +213,14 @@ export const AVAILABLE_LLM_PROVIDERS = [
     description: "Run LiteLLM's OpenAI compatible proxy for various LLMs.",
     requiredConfig: ["LiteLLMBasePath"],
   },
+  {
+    name: "DeepSeek",
+    value: "deepseek",
+    logo: DeepSeekLogo,
+    options: (settings) => <DeepSeekOptions settings={settings} />,
+    description: "Run DeepSeek's powerful LLMs.",
+    requiredConfig: ["DeepSeekApiKey"],
+  },
   {
     name: "Generic OpenAI",
     value: "generic-openai",
diff --git a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
index b739d502a822570cda86e4dff1eb3e6ac3d9da24..39d10e77f751e1d6b8aeec997a032b7119c316a2 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
@@ -20,6 +20,7 @@ import KoboldCPPLogo from "@/media/llmprovider/koboldcpp.png";
 import TextGenWebUILogo from "@/media/llmprovider/text-generation-webui.png";
 import LiteLLMLogo from "@/media/llmprovider/litellm.png";
 import AWSBedrockLogo from "@/media/llmprovider/bedrock.png";
+import DeepSeekLogo from "@/media/llmprovider/deepseek.png";
 
 import CohereLogo from "@/media/llmprovider/cohere.png";
 import ZillizLogo from "@/media/vectordbs/zilliz.png";
@@ -196,6 +197,11 @@ export const LLM_SELECTION_PRIVACY = {
     ],
     logo: AWSBedrockLogo,
   },
+  deepseek: {
+    name: "DeepSeek",
+    description: ["Your model and chat contents are visible to DeepSeek"],
+    logo: DeepSeekLogo,
+  },
 };
 
 export const VECTOR_DB_PRIVACY = {
diff --git a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
index 52996b695cc3979f4a56f765f3adc4bac177c3ae..81b26f66aca3b30c6304c4eb06ac6ae5b30cf0e0 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
@@ -20,6 +20,7 @@ import KoboldCPPLogo from "@/media/llmprovider/koboldcpp.png";
 import TextGenWebUILogo from "@/media/llmprovider/text-generation-webui.png";
 import LiteLLMLogo from "@/media/llmprovider/litellm.png";
 import AWSBedrockLogo from "@/media/llmprovider/bedrock.png";
+import DeepSeekLogo from "@/media/llmprovider/deepseek.png";
 
 import CohereLogo from "@/media/llmprovider/cohere.png";
 import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
@@ -43,6 +44,7 @@ import KoboldCPPOptions from "@/components/LLMSelection/KoboldCPPOptions";
 import TextGenWebUIOptions from "@/components/LLMSelection/TextGenWebUIOptions";
 import LiteLLMOptions from "@/components/LLMSelection/LiteLLMOptions";
 import AWSBedrockLLMOptions from "@/components/LLMSelection/AwsBedrockLLMOptions";
+import DeepSeekOptions from "@/components/LLMSelection/DeepSeekOptions";
 
 import LLMItem from "@/components/LLMSelection/LLMItem";
 import System from "@/models/system";
@@ -186,6 +188,13 @@ const LLMS = [
     options: (settings) => <LiteLLMOptions settings={settings} />,
     description: "Run LiteLLM's OpenAI compatible proxy for various LLMs.",
   },
+  {
+    name: "DeepSeek",
+    value: "deepseek",
+    logo: DeepSeekLogo,
+    options: (settings) => <DeepSeekOptions settings={settings} />,
+    description: "Run DeepSeek's powerful LLMs.",
+  },
   {
     name: "Generic OpenAI",
     value: "generic-openai",
diff --git a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
index 00a0aef95fc3ffcfde8cc910e4bb9a470cf5825f..97193d5a0b15a635693ac614d92d3484245b5949 100644
--- a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
+++ b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
@@ -23,6 +23,7 @@ const ENABLED_PROVIDERS = [
   "generic-openai",
   "bedrock",
   "fireworksai",
+  "deepseek",
   // TODO: More agent support.
   // "cohere",         // Has tool calling and will need to build explicit support
   // "huggingface"     // Can be done but already has issues with no-chat templated. Needs to be tested.
diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js
index e3012ec4ecaa06b00377267ecef50136ecb2b719..c510001917def88def2326b3b350f297d5f4f367 100644
--- a/server/models/systemSettings.js
+++ b/server/models/systemSettings.js
@@ -506,6 +506,10 @@ const SystemSettings = {
 
       // VoyageAi API Keys
       VoyageAiApiKey: !!process.env.VOYAGEAI_API_KEY,
+
+      // DeepSeek API Keys
+      DeepSeekApiKey: !!process.env.DEEPSEEK_API_KEY,
+      DeepSeekModelPref: process.env.DEEPSEEK_MODEL_PREF,
     };
   },
 
diff --git a/server/utils/AiProviders/deepseek/index.js b/server/utils/AiProviders/deepseek/index.js
new file mode 100644
index 0000000000000000000000000000000000000000..5ef4c9a1c02cb2b2251138b13e118f96c271d214
--- /dev/null
+++ b/server/utils/AiProviders/deepseek/index.js
@@ -0,0 +1,127 @@
+const { NativeEmbedder } = require("../../EmbeddingEngines/native");
+const {
+  handleDefaultStreamResponseV2,
+} = require("../../helpers/chat/responses");
+const { MODEL_MAP } = require("../modelMap");
+
+class DeepSeekLLM {
+  constructor(embedder = null, modelPreference = null) {
+    if (!process.env.DEEPSEEK_API_KEY)
+      throw new Error("No DeepSeek API key was set.");
+    const { OpenAI: OpenAIApi } = require("openai");
+
+    this.openai = new OpenAIApi({
+      apiKey: process.env.DEEPSEEK_API_KEY,
+      baseURL: "https://api.deepseek.com/v1",
+    });
+    this.model =
+      modelPreference || process.env.DEEPSEEK_MODEL_PREF || "deepseek-chat";
+    this.limits = {
+      history: this.promptWindowLimit() * 0.15,
+      system: this.promptWindowLimit() * 0.15,
+      user: this.promptWindowLimit() * 0.7,
+    };
+
+    this.embedder = embedder ?? new NativeEmbedder();
+    this.defaultTemp = 0.7;
+  }
+
+  #appendContext(contextTexts = []) {
+    if (!contextTexts || !contextTexts.length) return "";
+    return (
+      "\nContext:\n" +
+      contextTexts
+        .map((text, i) => {
+          return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+        })
+        .join("")
+    );
+  }
+
+  streamingEnabled() {
+    return "streamGetChatCompletion" in this;
+  }
+
+  static promptWindowLimit(modelName) {
+    return MODEL_MAP.deepseek[modelName] ?? 8192;
+  }
+
+  promptWindowLimit() {
+    return MODEL_MAP.deepseek[this.model] ?? 8192;
+  }
+
+  async isValidChatCompletionModel(modelName = "") {
+    const models = await this.openai.models.list().catch(() => ({ data: [] }));
+    return models.data.some((model) => model.id === modelName);
+  }
+
+  constructPrompt({
+    systemPrompt = "",
+    contextTexts = [],
+    chatHistory = [],
+    userPrompt = "",
+  }) {
+    const prompt = {
+      role: "system",
+      content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
+    };
+    return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
+  }
+
+  async getChatCompletion(messages = null, { temperature = 0.7 }) {
+    if (!(await this.isValidChatCompletionModel(this.model)))
+      throw new Error(
+        `DeepSeek chat: ${this.model} is not valid for chat completion!`
+      );
+
+    const result = await this.openai.chat.completions
+      .create({
+        model: this.model,
+        messages,
+        temperature,
+      })
+      .catch((e) => {
+        throw new Error(e.message);
+      });
+
+    if (!result.hasOwnProperty("choices") || result.choices.length === 0)
+      return null;
+    return result.choices[0].message.content;
+  }
+
+  async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
+    if (!(await this.isValidChatCompletionModel(this.model)))
+      throw new Error(
+        `DeepSeek chat: ${this.model} is not valid for chat completion!`
+      );
+
+    const streamRequest = await this.openai.chat.completions.create({
+      model: this.model,
+      stream: true,
+      messages,
+      temperature,
+    });
+    return streamRequest;
+  }
+
+  handleStream(response, stream, responseProps) {
+    return handleDefaultStreamResponseV2(response, stream, responseProps);
+  }
+
+  async embedTextInput(textInput) {
+    return await this.embedder.embedTextInput(textInput);
+  }
+  async embedChunks(textChunks = []) {
+    return await this.embedder.embedChunks(textChunks);
+  }
+
+  async compressMessages(promptArgs = {}, rawHistory = []) {
+    const { messageArrayCompressor } = require("../../helpers/chat");
+    const messageArray = this.constructPrompt(promptArgs);
+    return await messageArrayCompressor(this, messageArray, rawHistory);
+  }
+}
+
+module.exports = {
+  DeepSeekLLM,
+};
diff --git a/server/utils/AiProviders/modelMap.js b/server/utils/AiProviders/modelMap.js
index b7604b69a2235545ffed3ac7273b9b5a19732d7a..99d78dc14259f70996d472cdedb6e57dd0f288d3 100644
--- a/server/utils/AiProviders/modelMap.js
+++ b/server/utils/AiProviders/modelMap.js
@@ -53,6 +53,10 @@ const MODEL_MAP = {
     "gpt-4": 8_192,
     "gpt-4-32k": 32_000,
   },
+  deepseek: {
+    "deepseek-chat": 128_000,
+    "deepseek-coder": 128_000,
+  },
 };
 
 module.exports = { MODEL_MAP };
diff --git a/server/utils/agents/aibitat/index.js b/server/utils/agents/aibitat/index.js
index 90d6069c0e0fe1c75f7ffed1de5824ad13028ece..1d356f00ad086e3c2d050892175d1d43880897d4 100644
--- a/server/utils/agents/aibitat/index.js
+++ b/server/utils/agents/aibitat/index.js
@@ -783,6 +783,8 @@ ${this.getHistory({ to: route.to })
         return new Providers.AWSBedrockProvider({});
       case "fireworksai":
         return new Providers.FireworksAIProvider({ model: config.model });
+      case "deepseek":
+        return new Providers.DeepSeekProvider({ model: config.model });
 
       default:
         throw new Error(
diff --git a/server/utils/agents/aibitat/providers/ai-provider.js b/server/utils/agents/aibitat/providers/ai-provider.js
index 23d107647f1cf119bd6753445e7e0565491d3df0..3a144ec6ccf718d2a1664f57d7c048229a522e52 100644
--- a/server/utils/agents/aibitat/providers/ai-provider.js
+++ b/server/utils/agents/aibitat/providers/ai-provider.js
@@ -174,6 +174,14 @@ class Provider {
           apiKey: process.env.TEXT_GEN_WEB_UI_API_KEY ?? "not-used",
           ...config,
         });
+      case "deepseek":
+        return new ChatOpenAI({
+          configuration: {
+            baseURL: "https://api.deepseek.com/v1",
+          },
+          apiKey: process.env.DEEPSEEK_API_KEY ?? null,
+          ...config,
+        });
       default:
         throw new Error(`Unsupported provider ${provider} for this task.`);
     }
diff --git a/server/utils/agents/aibitat/providers/deepseek.js b/server/utils/agents/aibitat/providers/deepseek.js
new file mode 100644
index 0000000000000000000000000000000000000000..aec1ee39e12b3700a10a55e2fa880d2c66944653
--- /dev/null
+++ b/server/utils/agents/aibitat/providers/deepseek.js
@@ -0,0 +1,118 @@
+const OpenAI = require("openai");
+const Provider = require("./ai-provider.js");
+const InheritMultiple = require("./helpers/classes.js");
+const UnTooled = require("./helpers/untooled.js");
+const { toValidNumber } = require("../../../http/index.js");
+
+class DeepSeekProvider extends InheritMultiple([Provider, UnTooled]) {
+  model;
+
+  constructor(config = {}) {
+    super();
+    const { model = "deepseek-chat" } = config;
+    const client = new OpenAI({
+      baseURL: "https://api.deepseek.com/v1",
+      apiKey: process.env.DEEPSEEK_API_KEY ?? null,
+      maxRetries: 3,
+    });
+
+    this._client = client;
+    this.model = model;
+    this.verbose = true;
+    this.maxTokens = process.env.DEEPSEEK_MAX_TOKENS
+      ? toValidNumber(process.env.DEEPSEEK_MAX_TOKENS, 1024)
+      : 1024;
+  }
+
+  get client() {
+    return this._client;
+  }
+
+  async #handleFunctionCallChat({ messages = [] }) {
+    return await this.client.chat.completions
+      .create({
+        model: this.model,
+        temperature: 0,
+        messages,
+        max_tokens: this.maxTokens,
+      })
+      .then((result) => {
+        if (!result.hasOwnProperty("choices"))
+          throw new Error("DeepSeek chat: No results!");
+        if (result.choices.length === 0)
+          throw new Error("DeepSeek chat: No results length!");
+        return result.choices[0].message.content;
+      })
+      .catch((_) => {
+        return null;
+      });
+  }
+
+  /**
+   * Create a completion based on the received messages.
+   *
+   * @param messages A list of messages to send to the API.
+   * @param functions
+   * @returns The completion.
+   */
+  async complete(messages, functions = null) {
+    try {
+      let completion;
+      if (functions.length > 0) {
+        const { toolCall, text } = await this.functionCall(
+          messages,
+          functions,
+          this.#handleFunctionCallChat.bind(this)
+        );
+
+        if (toolCall !== null) {
+          this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
+          this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
+          return {
+            result: null,
+            functionCall: {
+              name: toolCall.name,
+              arguments: toolCall.arguments,
+            },
+            cost: 0,
+          };
+        }
+        completion = { content: text };
+      }
+
+      if (!completion?.content) {
+        this.providerLog(
+          "Will assume chat completion without tool call inputs."
+        );
+        const response = await this.client.chat.completions.create({
+          model: this.model,
+          messages: this.cleanMsgs(messages),
+        });
+        completion = response.choices[0].message;
+      }
+
+      // The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
+      // from calling the exact same function over and over in a loop within a single chat exchange
+      // _but_ we should enable it to call previously used tools in a new chat interaction.
+      this.deduplicator.reset("runs");
+      return {
+        result: completion.content,
+        cost: 0,
+      };
+    } catch (error) {
+      throw error;
+    }
+  }
+
+  /**
+   * Get the cost of the completion.
+   *
+   * @param _usage The completion to get the cost for.
+   * @returns The cost of the completion.
+   */
+  getCost(_usage) {
+    return 0;
+  }
+}
+
+module.exports = DeepSeekProvider;
diff --git a/server/utils/agents/aibitat/providers/index.js b/server/utils/agents/aibitat/providers/index.js
index dd95bb54a79fbe748a999ac3d0a4f8fc49177dbc..086e0ccf0f04c3a9c8fde389267e83dabc18663d 100644
--- a/server/utils/agents/aibitat/providers/index.js
+++ b/server/utils/agents/aibitat/providers/index.js
@@ -14,6 +14,7 @@ const PerplexityProvider = require("./perplexity.js");
 const TextWebGenUiProvider = require("./textgenwebui.js");
 const AWSBedrockProvider = require("./bedrock.js");
 const FireworksAIProvider = require("./fireworksai.js");
+const DeepSeekProvider = require("./deepseek.js");
 
 module.exports = {
   OpenAIProvider,
@@ -28,6 +29,7 @@ module.exports = {
   OpenRouterProvider,
   MistralProvider,
   GenericOpenAiProvider,
+  DeepSeekProvider,
   PerplexityProvider,
   TextWebGenUiProvider,
   AWSBedrockProvider,
diff --git a/server/utils/agents/index.js b/server/utils/agents/index.js
index 389d9d71b098ab73421d1e1deeb2b31300070179..3936f93884f58846063d538b694ad350c1d3873a 100644
--- a/server/utils/agents/index.js
+++ b/server/utils/agents/index.js
@@ -162,6 +162,10 @@ class AgentHandler {
             "FireworksAI API Key must be provided to use agents."
           );
         break;
+      case "deepseek":
+        if (!process.env.DEEPSEEK_API_KEY)
+          throw new Error("DeepSeek API Key must be provided to use agents.");
+        break;
 
       default:
         throw new Error(
@@ -206,6 +210,8 @@ class AgentHandler {
         return null;
       case "fireworksai":
         return null;
+      case "deepseek":
+        return "deepseek-chat";
       default:
         return "unknown";
     }
diff --git a/server/utils/helpers/customModels.js b/server/utils/helpers/customModels.js
index a25896ef4fcebd4ea919e6b3d4238a47fb206642..f061d35ff1f6432f20698fee8a7ddfdefaf9f7cb 100644
--- a/server/utils/helpers/customModels.js
+++ b/server/utils/helpers/customModels.js
@@ -18,6 +18,7 @@ const SUPPORT_CUSTOM_MODELS = [
   "litellm",
   "elevenlabs-tts",
   "groq",
+  "deepseek",
 ];
 
 async function getCustomModels(provider = "", apiKey = null, basePath = null) {
@@ -53,6 +54,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) {
       return await getElevenLabsModels(apiKey);
     case "groq":
       return await getGroqAiModels(apiKey);
+    case "deepseek":
+      return await getDeepSeekModels(apiKey);
     default:
       return { models: [], error: "Invalid provider for custom models" };
   }
@@ -419,6 +422,31 @@ async function getElevenLabsModels(apiKey = null) {
   return { models, error: null };
 }
 
+async function getDeepSeekModels(apiKey = null) {
+  const { OpenAI: OpenAIApi } = require("openai");
+  const openai = new OpenAIApi({
+    apiKey: apiKey || process.env.DEEPSEEK_API_KEY,
+    baseURL: "https://api.deepseek.com/v1",
+  });
+  const models = await openai.models
+    .list()
+    .then((results) => results.data)
+    .then((models) =>
+      models.map((model) => ({
+        id: model.id,
+        name: model.id,
+        organization: model.owned_by,
+      }))
+    )
+    .catch((e) => {
+      console.error(`DeepSeek:listModels`, e.message);
+      return [];
+    });
+
+  if (models.length > 0 && !!apiKey) process.env.DEEPSEEK_API_KEY = apiKey;
+  return { models, error: null };
+}
+
 module.exports = {
   getCustomModels,
 };
diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js
index 71e352c303d9a9807d89abad62f285e4917f1832..6f2dd79d40c723d3df2d4992ae7fdeb6d4136ef2 100644
--- a/server/utils/helpers/index.js
+++ b/server/utils/helpers/index.js
@@ -159,6 +159,9 @@ function getLLMProvider({ provider = null, model = null } = {}) {
     case "bedrock":
       const { AWSBedrockLLM } = require("../AiProviders/bedrock");
       return new AWSBedrockLLM(embedder, model);
+    case "deepseek":
+      const { DeepSeekLLM } = require("../AiProviders/deepseek");
+      return new DeepSeekLLM(embedder, model);
     default:
       throw new Error(
         `ENV: No valid LLM_PROVIDER value found in environment! Using ${process.env.LLM_PROVIDER}`
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index 7b70efa236467cfc4940f580718fb049466dcf64..e898d4b098db5c8cb0510a372a1787daa512ef42 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -501,6 +501,16 @@ const KEY_MAPPING = {
     envKey: "TTS_PIPER_VOICE_MODEL",
     checks: [],
   },
+
+  // DeepSeek Options
+  DeepSeekApiKey: {
+    envKey: "DEEPSEEK_API_KEY",
+    checks: [isNotEmpty],
+  },
+  DeepSeekModelPref: {
+    envKey: "DEEPSEEK_MODEL_PREF",
+    checks: [isNotEmpty],
+  },
 };
 
 function isNotEmpty(input = "") {
@@ -602,6 +612,7 @@ function supportedLLM(input = "") {
     "litellm",
     "generic-openai",
     "bedrock",
+    "deepseek",
   ].includes(input);
   return validSelection ? null : `${input} is not a valid LLM provider.`;
 }