diff --git a/docker/.env.example b/docker/.env.example
index 20120b5b54c4ef4dfc40c8aba2cb155f9548f01a..e10ace026d12938381888a53aba19536579b2533 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -66,6 +66,11 @@ GID='1000'
 # GROQ_API_KEY=gsk_abcxyz
 # GROQ_MODEL_PREF=llama3-8b-8192
 
+# LLM_PROVIDER='koboldcpp'
+# KOBOLD_CPP_BASE_PATH='http://127.0.0.1:5000/v1'
+# KOBOLD_CPP_MODEL_PREF='koboldcpp/codellama-7b-instruct.Q4_K_S'
+# KOBOLD_CPP_MODEL_TOKEN_LIMIT=4096
+
 # LLM_PROVIDER='generic-openai'
 # GENERIC_OPEN_AI_BASE_PATH='http://proxy.url.openai.com/v1'
 # GENERIC_OPEN_AI_MODEL_PREF='gpt-3.5-turbo'
diff --git a/frontend/src/components/LLMSelection/KoboldCPPOptions/index.jsx b/frontend/src/components/LLMSelection/KoboldCPPOptions/index.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..7e5e20aef3a7f1ae55fcecdd96014a4121bbb023
--- /dev/null
+++ b/frontend/src/components/LLMSelection/KoboldCPPOptions/index.jsx
@@ -0,0 +1,112 @@
+import { useState, useEffect } from "react";
+import System from "@/models/system";
+
+export default function KoboldCPPOptions({ settings }) {
+  const [basePathValue, setBasePathValue] = useState(
+    settings?.KoboldCPPBasePath
+  );
+  const [basePath, setBasePath] = useState(settings?.KoboldCPPBasePath);
+
+  return (
+    <div className="flex gap-4 flex-wrap">
+      <div className="flex flex-col w-60">
+        <label className="text-white text-sm font-semibold block mb-4">
+          Base URL
+        </label>
+        <input
+          type="url"
+          name="KoboldCPPBasePath"
+          className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
+          placeholder="http://127.0.0.1:5000/v1"
+          defaultValue={settings?.KoboldCPPBasePath}
+          required={true}
+          autoComplete="off"
+          spellCheck={false}
+          onChange={(e) => setBasePathValue(e.target.value)}
+          onBlur={() => setBasePath(basePathValue)}
+        />
+      </div>
+      <KoboldCPPModelSelection settings={settings} basePath={basePath} />
+      <div className="flex flex-col w-60">
+        <label className="text-white text-sm font-semibold block mb-4">
+          Token context window
+        </label>
+        <input
+          type="number"
+          name="KoboldCPPTokenLimit"
+          className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
+          placeholder="4096"
+          min={1}
+          onScroll={(e) => e.target.blur()}
+          defaultValue={settings?.KoboldCPPTokenLimit}
+          required={true}
+          autoComplete="off"
+        />
+      </div>
+    </div>
+  );
+}
+
+function KoboldCPPModelSelection({ settings, basePath = null }) {
+  const [customModels, setCustomModels] = useState([]);
+  const [loading, setLoading] = useState(true);
+
+  useEffect(() => {
+    async function findCustomModels() {
+      if (!basePath || !basePath.includes("/v1")) {
+        setCustomModels([]);
+        setLoading(false);
+        return;
+      }
+      setLoading(true);
+      const { models } = await System.customModels("koboldcpp", null, basePath);
+      setCustomModels(models || []);
+      setLoading(false);
+    }
+    findCustomModels();
+  }, [basePath]);
+
+  if (loading || customModels.length === 0) {
+    return (
+      <div className="flex flex-col w-60">
+        <label className="text-white text-sm font-semibold block mb-4">
+          Chat Model Selection
+        </label>
+        <select
+          name="KoboldCPPModelPref"
+          disabled={true}
+          className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
+        >
+          <option disabled={true} selected={true}>
+            {basePath?.includes("/v1")
+              ? "-- loading available models --"
+              : "-- waiting for URL --"}
+          </option>
+        </select>
+      </div>
+    );
+  }
+
+  return (
+    <div className="flex flex-col w-60">
+      <label className="text-white text-sm font-semibold block mb-4">
+        Chat Model Selection
+      </label>
+      <select
+        name="KoboldCPPModelPref"
+        required={true}
+        className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
+      >
+        {customModels.map((model) => (
+          <option
+            key={model.id}
+            value={model.id}
+            selected={settings?.KoboldCPPModelPref === model.id}
+          >
+            {model.id}
+          </option>
+        ))}
+      </select>
+    </div>
+  );
+}
diff --git a/frontend/src/media/llmprovider/koboldcpp.png b/frontend/src/media/llmprovider/koboldcpp.png
new file mode 100644
index 0000000000000000000000000000000000000000..5724f04ab3a1bc40ac4e56c1fe9ee61fc67ba3ea
Binary files /dev/null and b/frontend/src/media/llmprovider/koboldcpp.png differ
diff --git a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
index ce37bd480501f803933ca5331cab89b7bd201c8f..60827e0def06274e04cc3ea8781869df49f3a20a 100644
--- a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
+++ b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
@@ -18,6 +18,7 @@ import HuggingFaceLogo from "@/media/llmprovider/huggingface.png";
 import PerplexityLogo from "@/media/llmprovider/perplexity.png";
 import OpenRouterLogo from "@/media/llmprovider/openrouter.jpeg";
 import GroqLogo from "@/media/llmprovider/groq.png";
+import KoboldCPPLogo from "@/media/llmprovider/koboldcpp.png";
 import CohereLogo from "@/media/llmprovider/cohere.png";
 import PreLoader from "@/components/Preloader";
 import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
@@ -40,6 +41,7 @@ import CohereAiOptions from "@/components/LLMSelection/CohereAiOptions";
 import LLMItem from "@/components/LLMSelection/LLMItem";
 import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
 import CTAButton from "@/components/lib/CTAButton";
+import KoboldCPPOptions from "@/components/LLMSelection/KoboldCPPOptions";
 
 export const AVAILABLE_LLM_PROVIDERS = [
   {
@@ -154,6 +156,18 @@ export const AVAILABLE_LLM_PROVIDERS = [
       "The fastest LLM inferencing available for real-time AI applications.",
     requiredConfig: ["GroqApiKey"],
   },
+  {
+    name: "KoboldCPP",
+    value: "koboldcpp",
+    logo: KoboldCPPLogo,
+    options: (settings) => <KoboldCPPOptions settings={settings} />,
+    description: "Run local LLMs using koboldcpp.",
+    requiredConfig: [
+      "KoboldCPPModelPref",
+      "KoboldCPPBasePath",
+      "KoboldCPPTokenLimit",
+    ],
+  },
   {
     name: "Cohere",
     value: "cohere",
diff --git a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
index d0613b8c3ea84179743a2eaaa3f14fb5df102068..6e8a18974f04a5aba77ae37c52d204bdff8bf850 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
@@ -15,6 +15,7 @@ import HuggingFaceLogo from "@/media/llmprovider/huggingface.png";
 import PerplexityLogo from "@/media/llmprovider/perplexity.png";
 import OpenRouterLogo from "@/media/llmprovider/openrouter.jpeg";
 import GroqLogo from "@/media/llmprovider/groq.png";
+import KoboldCPPLogo from "@/media/llmprovider/koboldcpp.png";
 import CohereLogo from "@/media/llmprovider/cohere.png";
 import ZillizLogo from "@/media/vectordbs/zilliz.png";
 import AstraDBLogo from "@/media/vectordbs/astraDB.png";
@@ -138,6 +139,13 @@ export const LLM_SELECTION_PRIVACY = {
     ],
     logo: GroqLogo,
   },
+  koboldcpp: {
+    name: "KoboldCPP",
+    description: [
+      "Your model and chats are only accessible on the server running KoboldCPP",
+    ],
+    logo: KoboldCPPLogo,
+  },
   "generic-openai": {
     name: "Generic OpenAI compatible service",
     description: [
diff --git a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
index 0e73c399fb1b26f3b4bc4e6d7ee4ea54349225de..4cf3c221e9ec8dec0dd9ae66165f1500570d77de 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
@@ -15,6 +15,7 @@ import HuggingFaceLogo from "@/media/llmprovider/huggingface.png";
 import PerplexityLogo from "@/media/llmprovider/perplexity.png";
 import OpenRouterLogo from "@/media/llmprovider/openrouter.jpeg";
 import GroqLogo from "@/media/llmprovider/groq.png";
+import KoboldCPPLogo from "@/media/llmprovider/koboldcpp.png";
 import CohereLogo from "@/media/llmprovider/cohere.png";
 import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
 import GenericOpenAiOptions from "@/components/LLMSelection/GenericOpenAiOptions";
@@ -38,6 +39,7 @@ import System from "@/models/system";
 import paths from "@/utils/paths";
 import showToast from "@/utils/toast";
 import { useNavigate } from "react-router-dom";
+import KoboldCPPOptions from "@/components/LLMSelection/KoboldCPPOptions";
 
 const TITLE = "LLM Preference";
 const DESCRIPTION =
@@ -102,6 +104,13 @@ const LLMS = [
     options: (settings) => <LocalAiOptions settings={settings} />,
     description: "Run LLMs locally on your own machine.",
   },
+  {
+    name: "KoboldCPP",
+    value: "koboldcpp",
+    logo: KoboldCPPLogo,
+    options: (settings) => <KoboldCPPOptions settings={settings} />,
+    description: "Run local LLMs using koboldcpp.",
+  },
   {
     name: "Together AI",
     value: "togetherai",
diff --git a/server/.env.example b/server/.env.example
index e515cc88870d62e329a810008e4e632584ac871b..c8f05340a1f4e6e0d29d7f03423d5d6d7707a2fc 100644
--- a/server/.env.example
+++ b/server/.env.example
@@ -63,6 +63,11 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea
 # GROQ_API_KEY=gsk_abcxyz
 # GROQ_MODEL_PREF=llama3-8b-8192
 
+# LLM_PROVIDER='koboldcpp'
+# KOBOLD_CPP_BASE_PATH='http://127.0.0.1:5000/v1'
+# KOBOLD_CPP_MODEL_PREF='koboldcpp/codellama-7b-instruct.Q4_K_S'
+# KOBOLD_CPP_MODEL_TOKEN_LIMIT=4096
+
 # LLM_PROVIDER='generic-openai'
 # GENERIC_OPEN_AI_BASE_PATH='http://proxy.url.openai.com/v1'
 # GENERIC_OPEN_AI_MODEL_PREF='gpt-3.5-turbo'
diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js
index dfbdb882fd872229410f415ec6e66d8144106d36..f7782d26a4113f752de40cdb9679e93a715ec797 100644
--- a/server/models/systemSettings.js
+++ b/server/models/systemSettings.js
@@ -359,6 +359,11 @@ const SystemSettings = {
       HuggingFaceLLMAccessToken: !!process.env.HUGGING_FACE_LLM_API_KEY,
       HuggingFaceLLMTokenLimit: process.env.HUGGING_FACE_LLM_TOKEN_LIMIT,
 
+      // KoboldCPP Keys
+      KoboldCPPModelPref: process.env.KOBOLD_CPP_MODEL_PREF,
+      KoboldCPPBasePath: process.env.KOBOLD_CPP_BASE_PATH,
+      KoboldCPPTokenLimit: process.env.KOBOLD_CPP_MODEL_TOKEN_LIMIT,
+
       // Generic OpenAI Keys
       GenericOpenAiBasePath: process.env.GENERIC_OPEN_AI_BASE_PATH,
       GenericOpenAiModelPref: process.env.GENERIC_OPEN_AI_MODEL_PREF,
diff --git a/server/utils/AiProviders/koboldCPP/index.js b/server/utils/AiProviders/koboldCPP/index.js
new file mode 100644
index 0000000000000000000000000000000000000000..4b1ff3f61b027a506b7072b111b91c9430cce0fd
--- /dev/null
+++ b/server/utils/AiProviders/koboldCPP/index.js
@@ -0,0 +1,180 @@
+const { NativeEmbedder } = require("../../EmbeddingEngines/native");
+const {
+  clientAbortedHandler,
+  writeResponseChunk,
+} = require("../../helpers/chat/responses");
+const { v4: uuidv4 } = require("uuid");
+
+class KoboldCPPLLM {
+  constructor(embedder = null, modelPreference = null) {
+    const { OpenAI: OpenAIApi } = require("openai");
+    if (!process.env.KOBOLD_CPP_BASE_PATH)
+      throw new Error(
+        "KoboldCPP must have a valid base path to use for the api."
+      );
+
+    this.basePath = process.env.KOBOLD_CPP_BASE_PATH;
+    this.openai = new OpenAIApi({
+      baseURL: this.basePath,
+      apiKey: null,
+    });
+    this.model = modelPreference ?? process.env.KOBOLD_CPP_MODEL_PREF ?? null;
+    if (!this.model) throw new Error("KoboldCPP must have a valid model set.");
+    this.limits = {
+      history: this.promptWindowLimit() * 0.15,
+      system: this.promptWindowLimit() * 0.15,
+      user: this.promptWindowLimit() * 0.7,
+    };
+
+    if (!embedder)
+      console.warn(
+        "No embedding provider defined for KoboldCPPLLM - falling back to NativeEmbedder for embedding!"
+      );
+    this.embedder = !embedder ? new NativeEmbedder() : embedder;
+    this.defaultTemp = 0.7;
+    this.log(`Inference API: ${this.basePath} Model: ${this.model}`);
+  }
+
+  log(text, ...args) {
+    console.log(`\x1b[36m[${this.constructor.name}]\x1b[0m ${text}`, ...args);
+  }
+
+  #appendContext(contextTexts = []) {
+    if (!contextTexts || !contextTexts.length) return "";
+    return (
+      "\nContext:\n" +
+      contextTexts
+        .map((text, i) => {
+          return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+        })
+        .join("")
+    );
+  }
+
+  streamingEnabled() {
+    return "streamGetChatCompletion" in this;
+  }
+
+  // Ensure the user set a value for the token limit
+  // and if undefined - assume 4096 window.
+  promptWindowLimit() {
+    const limit = process.env.KOBOLD_CPP_MODEL_TOKEN_LIMIT || 4096;
+    if (!limit || isNaN(Number(limit)))
+      throw new Error("No token context limit was set.");
+    return Number(limit);
+  }
+
+  // Short circuit since we have no idea if the model is valid or not
+  // in pre-flight for generic endpoints
+  isValidChatCompletionModel(_modelName = "") {
+    return true;
+  }
+
+  constructPrompt({
+    systemPrompt = "",
+    contextTexts = [],
+    chatHistory = [],
+    userPrompt = "",
+  }) {
+    const prompt = {
+      role: "system",
+      content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
+    };
+    return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
+  }
+
+  async isSafe(_input = "") {
+    // Not implemented so must be stubbed
+    return { safe: true, reasons: [] };
+  }
+
+  async getChatCompletion(messages = null, { temperature = 0.7 }) {
+    const result = await this.openai.chat.completions
+      .create({
+        model: this.model,
+        messages,
+        temperature,
+      })
+      .catch((e) => {
+        throw new Error(e.response.data.error.message);
+      });
+
+    if (!result.hasOwnProperty("choices") || result.choices.length === 0)
+      return null;
+    return result.choices[0].message.content;
+  }
+
+  async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
+    const streamRequest = await this.openai.chat.completions.create({
+      model: this.model,
+      stream: true,
+      messages,
+      temperature,
+    });
+    return streamRequest;
+  }
+
+  handleStream(response, stream, responseProps) {
+    const { uuid = uuidv4(), sources = [] } = responseProps;
+
+    // Custom handler for KoboldCPP stream responses
+    return new Promise(async (resolve) => {
+      let fullText = "";
+      const handleAbort = () => clientAbortedHandler(resolve, fullText);
+      response.on("close", handleAbort);
+
+      for await (const chunk of stream) {
+        const message = chunk?.choices?.[0];
+        const token = message?.delta?.content;
+
+        if (token) {
+          fullText += token;
+          writeResponseChunk(response, {
+            uuid,
+            sources: [],
+            type: "textResponseChunk",
+            textResponse: token,
+            close: false,
+            error: false,
+          });
+        }
+
+        // KoboldCPP finishes with "length" or "stop"
+        if (
+          message.finish_reason !== "null" &&
+          (message.finish_reason === "length" ||
+            message.finish_reason === "stop")
+        ) {
+          writeResponseChunk(response, {
+            uuid,
+            sources,
+            type: "textResponseChunk",
+            textResponse: "",
+            close: true,
+            error: false,
+          });
+          response.removeListener("close", handleAbort);
+          resolve(fullText);
+        }
+      }
+    });
+  }
+
+  // Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
+  async embedTextInput(textInput) {
+    return await this.embedder.embedTextInput(textInput);
+  }
+  async embedChunks(textChunks = []) {
+    return await this.embedder.embedChunks(textChunks);
+  }
+
+  async compressMessages(promptArgs = {}, rawHistory = []) {
+    const { messageArrayCompressor } = require("../../helpers/chat");
+    const messageArray = this.constructPrompt(promptArgs);
+    return await messageArrayCompressor(this, messageArray, rawHistory);
+  }
+}
+
+module.exports = {
+  KoboldCPPLLM,
+};
diff --git a/server/utils/helpers/customModels.js b/server/utils/helpers/customModels.js
index 1bb54170a3a686c8207c4079be7be5b7f71823fa..ce690ae47242a2c36ca6c0f5c046c7c18259496c 100644
--- a/server/utils/helpers/customModels.js
+++ b/server/utils/helpers/customModels.js
@@ -14,6 +14,7 @@ const SUPPORT_CUSTOM_MODELS = [
   "perplexity",
   "openrouter",
   "lmstudio",
+  "koboldcpp",
 ];
 
 async function getCustomModels(provider = "", apiKey = null, basePath = null) {
@@ -39,6 +40,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) {
       return await getOpenRouterModels();
     case "lmstudio":
       return await getLMStudioModels(basePath);
+    case "koboldcpp":
+      return await getKoboldCPPModels(basePath);
     default:
       return { models: [], error: "Invalid provider for custom models" };
   }
@@ -171,6 +174,28 @@ async function getLMStudioModels(basePath = null) {
   }
 }
 
+async function getKoboldCPPModels(basePath = null) {
+  try {
+    const { OpenAI: OpenAIApi } = require("openai");
+    const openai = new OpenAIApi({
+      baseURL: basePath || process.env.LMSTUDIO_BASE_PATH,
+      apiKey: null,
+    });
+    const models = await openai.models
+      .list()
+      .then((results) => results.data)
+      .catch((e) => {
+        console.error(`KoboldCPP:listModels`, e.message);
+        return [];
+      });
+
+    return { models, error: null };
+  } catch (e) {
+    console.error(`KoboldCPP:getKoboldCPPModels`, e.message);
+    return { models: [], error: "Could not fetch KoboldCPP Models" };
+  }
+}
+
 async function ollamaAIModels(basePath = null) {
   let url;
   try {
diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js
index 5d88040dc5413478f2f0dd50ec68f4162bc6674d..ba65e3dfb6fcb860980a4996a6771209e0d63c2d 100644
--- a/server/utils/helpers/index.js
+++ b/server/utils/helpers/index.js
@@ -77,6 +77,9 @@ function getLLMProvider({ provider = null, model = null } = {}) {
     case "groq":
       const { GroqLLM } = require("../AiProviders/groq");
       return new GroqLLM(embedder, model);
+    case "koboldcpp":
+      const { KoboldCPPLLM } = require("../AiProviders/koboldCPP");
+      return new KoboldCPPLLM(embedder, model);
     case "cohere":
       const { CohereLLM } = require("../AiProviders/cohere");
       return new CohereLLM(embedder, model);
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index 45f2fd546db2f9aac1ac2f600841229537b22031..19cdfe2b2d7e74227cb773b7083de1a2ccc74620 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -132,6 +132,20 @@ const KEY_MAPPING = {
     checks: [nonZero],
   },
 
+  // KoboldCPP Settings
+  KoboldCPPBasePath: {
+    envKey: "KOBOLD_CPP_BASE_PATH",
+    checks: [isNotEmpty, isValidURL],
+  },
+  KoboldCPPModelPref: {
+    envKey: "KOBOLD_CPP_MODEL_PREF",
+    checks: [isNotEmpty],
+  },
+  KoboldCPPTokenLimit: {
+    envKey: "KOBOLD_CPP_MODEL_TOKEN_LIMIT",
+    checks: [nonZero],
+  },
+
   // Generic OpenAI InferenceSettings
   GenericOpenAiBasePath: {
     envKey: "GENERIC_OPEN_AI_BASE_PATH",
@@ -403,6 +417,7 @@ function supportedLLM(input = "") {
     "perplexity",
     "openrouter",
     "groq",
+    "koboldcpp",
     "cohere",
     "generic-openai",
   ].includes(input);