diff --git a/.vscode/settings.json b/.vscode/settings.json
index d60238c72cbd688d736f91e8bd0ca8f9941f373c..1409c1073920dbf4a1b7e6c1a81adef710cc905b 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -5,6 +5,7 @@
     "AIbitat",
     "allm",
     "anythingllm",
+    "Apipie",
     "Astra",
     "Chartable",
     "cleancss",
@@ -18,6 +19,7 @@
     "elevenlabs",
     "Embeddable",
     "epub",
+    "fireworksai",
     "GROQ",
     "hljs",
     "huggingface",
@@ -40,14 +42,13 @@
     "pagerender",
     "Qdrant",
     "royalblue",
-    "searxng",
     "SearchApi",
+    "searxng",
     "Serper",
     "Serply",
     "streamable",
     "textgenwebui",
     "togetherai",
-    "fireworksai",
     "Unembed",
     "vectordbs",
     "Weaviate",
diff --git a/docker/.env.example b/docker/.env.example
index e67ac5ddd8b1335199c41bc1e5f5825330963d8f..55f3b26278b2fd00762d21a5f6a6fab616b20833 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -105,6 +105,10 @@ GID='1000'
 # FIREWORKS_AI_LLM_API_KEY='my-fireworks-ai-key'
 # FIREWORKS_AI_LLM_MODEL_PREF='accounts/fireworks/models/llama-v3p1-8b-instruct'
 
+# LLM_PROVIDER='apipie'
+# APIPIE_LLM_API_KEY='sk-123abc'
+# APIPIE_LLM_MODEL_PREF='openrouter/llama-3.1-8b-instruct'
+
 ###########################################
 ######## Embedding API SElECTION ##########
 ###########################################
diff --git a/frontend/src/components/LLMSelection/ApiPieOptions/index.jsx b/frontend/src/components/LLMSelection/ApiPieOptions/index.jsx
new file mode 100644
index 0000000000000000000000000000000000000000..9bb16ae3d42c145e649d8e2d16c438f8922f90ca
--- /dev/null
+++ b/frontend/src/components/LLMSelection/ApiPieOptions/index.jsx
@@ -0,0 +1,101 @@
+import System from "@/models/system";
+import { useState, useEffect } from "react";
+
+export default function ApiPieLLMOptions({ settings }) {
+  return (
+    <div className="flex flex-col gap-y-4 mt-1.5">
+      <div className="flex gap-[36px]">
+        <div className="flex flex-col w-60">
+          <label className="text-white text-sm font-semibold block mb-3">
+            APIpie API Key
+          </label>
+          <input
+            type="password"
+            name="ApipieLLMApiKey"
+            className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
+            placeholder="APIpie API Key"
+            defaultValue={settings?.ApipieLLMApiKey ? "*".repeat(20) : ""}
+            required={true}
+            autoComplete="off"
+            spellCheck={false}
+          />
+        </div>
+        {!settings?.credentialsOnly && (
+          <APIPieModelSelection settings={settings} />
+        )}
+      </div>
+    </div>
+  );
+}
+
+function APIPieModelSelection({ settings }) {
+  const [groupedModels, setGroupedModels] = useState({});
+  const [loading, setLoading] = useState(true);
+
+  useEffect(() => {
+    async function findCustomModels() {
+      setLoading(true);
+      const { models } = await System.customModels("apipie");
+      if (models?.length > 0) {
+        const modelsByOrganization = models.reduce((acc, model) => {
+          acc[model.organization] = acc[model.organization] || [];
+          acc[model.organization].push(model);
+          return acc;
+        }, {});
+
+        setGroupedModels(modelsByOrganization);
+      }
+
+      setLoading(false);
+    }
+    findCustomModels();
+  }, []);
+
+  if (loading || Object.keys(groupedModels).length === 0) {
+    return (
+      <div className="flex flex-col w-60">
+        <label className="text-white text-sm font-semibold block mb-3">
+          Chat Model Selection
+        </label>
+        <select
+          name="ApipieLLMModelPref"
+          disabled={true}
+          className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
+        >
+          <option disabled={true} selected={true}>
+            -- loading available models --
+          </option>
+        </select>
+      </div>
+    );
+  }
+
+  return (
+    <div className="flex flex-col w-60">
+      <label className="text-white text-sm font-semibold block mb-3">
+        Chat Model Selection
+      </label>
+      <select
+        name="ApipieLLMModelPref"
+        required={true}
+        className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
+      >
+        {Object.keys(groupedModels)
+          .sort()
+          .map((organization) => (
+            <optgroup key={organization} label={organization}>
+              {groupedModels[organization].map((model) => (
+                <option
+                  key={model.id}
+                  value={model.id}
+                  selected={settings?.ApipieLLMModelPref === model.id}
+                >
+                  {model.name}
+                </option>
+              ))}
+            </optgroup>
+          ))}
+      </select>
+    </div>
+  );
+}
diff --git a/frontend/src/media/llmprovider/apipie.png b/frontend/src/media/llmprovider/apipie.png
new file mode 100644
index 0000000000000000000000000000000000000000..f7faf500283cd6aba54afd8691a87df3ebb2a559
Binary files /dev/null and b/frontend/src/media/llmprovider/apipie.png differ
diff --git a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
index 6b041c1175185fbc02b7d5fa7941f5f72a831cd2..d471dc3584c07740d803762db74349725191f19b 100644
--- a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
+++ b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
@@ -26,6 +26,7 @@ import CohereLogo from "@/media/llmprovider/cohere.png";
 import LiteLLMLogo from "@/media/llmprovider/litellm.png";
 import AWSBedrockLogo from "@/media/llmprovider/bedrock.png";
 import DeepSeekLogo from "@/media/llmprovider/deepseek.png";
+import APIPieLogo from "@/media/llmprovider/apipie.png";
 
 import PreLoader from "@/components/Preloader";
 import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
@@ -50,6 +51,7 @@ import TextGenWebUIOptions from "@/components/LLMSelection/TextGenWebUIOptions";
 import LiteLLMOptions from "@/components/LLMSelection/LiteLLMOptions";
 import AWSBedrockLLMOptions from "@/components/LLMSelection/AwsBedrockLLMOptions";
 import DeepSeekOptions from "@/components/LLMSelection/DeepSeekOptions";
+import ApiPieLLMOptions from "@/components/LLMSelection/ApiPieOptions";
 
 import LLMItem from "@/components/LLMSelection/LLMItem";
 import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
@@ -221,6 +223,27 @@ export const AVAILABLE_LLM_PROVIDERS = [
     description: "Run DeepSeek's powerful LLMs.",
     requiredConfig: ["DeepSeekApiKey"],
   },
+  {
+    name: "AWS Bedrock",
+    value: "bedrock",
+    logo: AWSBedrockLogo,
+    options: (settings) => <AWSBedrockLLMOptions settings={settings} />,
+    description: "Run powerful foundation models privately with AWS Bedrock.",
+    requiredConfig: [
+      "AwsBedrockLLMAccessKeyId",
+      "AwsBedrockLLMAccessKey",
+      "AwsBedrockLLMRegion",
+      "AwsBedrockLLMModel",
+    ],
+  },
+  {
+    name: "APIpie",
+    value: "apipie",
+    logo: APIPieLogo,
+    options: (settings) => <ApiPieLLMOptions settings={settings} />,
+    description: "A unified API of AI services from leading providers",
+    requiredConfig: ["ApipieLLMApiKey", "ApipieLLMModelPref"],
+  },
   {
     name: "Generic OpenAI",
     value: "generic-openai",
@@ -235,19 +258,6 @@ export const AVAILABLE_LLM_PROVIDERS = [
       "GenericOpenAiKey",
     ],
   },
-  {
-    name: "AWS Bedrock",
-    value: "bedrock",
-    logo: AWSBedrockLogo,
-    options: (settings) => <AWSBedrockLLMOptions settings={settings} />,
-    description: "Run powerful foundation models privately with AWS Bedrock.",
-    requiredConfig: [
-      "AwsBedrockLLMAccessKeyId",
-      "AwsBedrockLLMAccessKey",
-      "AwsBedrockLLMRegion",
-      "AwsBedrockLLMModel",
-    ],
-  },
   {
     name: "Native",
     value: "native",
diff --git a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
index 39d10e77f751e1d6b8aeec997a032b7119c316a2..e3b4e2ee87011d4812b1b86bac77f1a5b67433da 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
@@ -21,6 +21,7 @@ import TextGenWebUILogo from "@/media/llmprovider/text-generation-webui.png";
 import LiteLLMLogo from "@/media/llmprovider/litellm.png";
 import AWSBedrockLogo from "@/media/llmprovider/bedrock.png";
 import DeepSeekLogo from "@/media/llmprovider/deepseek.png";
+import APIPieLogo from "@/media/llmprovider/apipie.png";
 
 import CohereLogo from "@/media/llmprovider/cohere.png";
 import ZillizLogo from "@/media/vectordbs/zilliz.png";
@@ -202,6 +203,13 @@ export const LLM_SELECTION_PRIVACY = {
     description: ["Your model and chat contents are visible to DeepSeek"],
     logo: DeepSeekLogo,
   },
+  apipie: {
+    name: "APIpie.AI",
+    description: [
+      "Your model and chat contents are visible to APIpie in accordance with their terms of service.",
+    ],
+    logo: APIPieLogo,
+  },
 };
 
 export const VECTOR_DB_PRIVACY = {
diff --git a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
index 81b26f66aca3b30c6304c4eb06ac6ae5b30cf0e0..1b69369f5f9c3e44dc75fc7d313c8cb98db5ab17 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
@@ -21,6 +21,7 @@ import TextGenWebUILogo from "@/media/llmprovider/text-generation-webui.png";
 import LiteLLMLogo from "@/media/llmprovider/litellm.png";
 import AWSBedrockLogo from "@/media/llmprovider/bedrock.png";
 import DeepSeekLogo from "@/media/llmprovider/deepseek.png";
+import APIPieLogo from "@/media/llmprovider/apipie.png";
 
 import CohereLogo from "@/media/llmprovider/cohere.png";
 import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
@@ -45,6 +46,7 @@ import TextGenWebUIOptions from "@/components/LLMSelection/TextGenWebUIOptions";
 import LiteLLMOptions from "@/components/LLMSelection/LiteLLMOptions";
 import AWSBedrockLLMOptions from "@/components/LLMSelection/AwsBedrockLLMOptions";
 import DeepSeekOptions from "@/components/LLMSelection/DeepSeekOptions";
+import ApiPieLLMOptions from "@/components/LLMSelection/ApiPieOptions";
 
 import LLMItem from "@/components/LLMSelection/LLMItem";
 import System from "@/models/system";
@@ -195,6 +197,13 @@ const LLMS = [
     options: (settings) => <DeepSeekOptions settings={settings} />,
     description: "Run DeepSeek's powerful LLMs.",
   },
+  {
+    name: "APIpie",
+    value: "apipie",
+    logo: APIPieLogo,
+    options: (settings) => <ApiPieLLMOptions settings={settings} />,
+    description: "A unified API of AI services from leading providers",
+  },
   {
     name: "Generic OpenAI",
     value: "generic-openai",
diff --git a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
index 97193d5a0b15a635693ac614d92d3484245b5949..5fd9c8e3323f4c097146965cb23ca499af835112 100644
--- a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
+++ b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
@@ -24,6 +24,7 @@ const ENABLED_PROVIDERS = [
   "bedrock",
   "fireworksai",
   "deepseek",
+  "apipie",
   // TODO: More agent support.
   // "cohere",         // Has tool calling and will need to build explicit support
   // "huggingface"     // Can be done but already has issues with no-chat templated. Needs to be tested.
diff --git a/server/.env.example b/server/.env.example
index 80009cfe8bd8f40b24fb4f1ae37a51682c9ed8e9..e6a3871d650e902e048916d7698f8f97eed6a6d0 100644
--- a/server/.env.example
+++ b/server/.env.example
@@ -95,6 +95,10 @@ SIG_SALT='salt' # Please generate random string at least 32 chars long.
 # COHERE_API_KEY=
 # COHERE_MODEL_PREF='command-r'
 
+# LLM_PROVIDER='apipie'
+# APIPIE_LLM_API_KEY='sk-123abc'
+# APIPIE_LLM_MODEL_PREF='openrouter/llama-3.1-8b-instruct'
+
 ###########################################
 ######## Embedding API SElECTION ##########
 ###########################################
diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js
index e4c0fa9d9bfb16410ced246a81fedd5cb236b418..8e3a617675d23b336fc5b4e72353df2dda64a732 100644
--- a/server/models/systemSettings.js
+++ b/server/models/systemSettings.js
@@ -512,6 +512,10 @@ const SystemSettings = {
       // DeepSeek API Keys
       DeepSeekApiKey: !!process.env.DEEPSEEK_API_KEY,
       DeepSeekModelPref: process.env.DEEPSEEK_MODEL_PREF,
+
+      // APIPie LLM API Keys
+      ApipieLLMApiKey: !!process.env.APIPIE_LLM_API_KEY,
+      ApipieLLMModelPref: process.env.APIPIE_LLM_MODEL_PREF,
     };
   },
 
diff --git a/server/storage/models/.gitignore b/server/storage/models/.gitignore
index 6ed579fa3498dbce9defc91fbd2ede78dc43ea59..b78160e7973478e7bfbe0f7f1cf64720032dfb56 100644
--- a/server/storage/models/.gitignore
+++ b/server/storage/models/.gitignore
@@ -1,4 +1,5 @@
 Xenova
 downloaded/*
 !downloaded/.placeholder
-openrouter
\ No newline at end of file
+openrouter
+apipie
\ No newline at end of file
diff --git a/server/utils/AiProviders/apipie/index.js b/server/utils/AiProviders/apipie/index.js
new file mode 100644
index 0000000000000000000000000000000000000000..acfd2b1e6a023e92883eac4f2c21605ffcaadb86
--- /dev/null
+++ b/server/utils/AiProviders/apipie/index.js
@@ -0,0 +1,336 @@
+const { NativeEmbedder } = require("../../EmbeddingEngines/native");
+const {
+  handleDefaultStreamResponseV2,
+} = require("../../helpers/chat/responses");
+
+const { v4: uuidv4 } = require("uuid");
+const {
+  writeResponseChunk,
+  clientAbortedHandler,
+} = require("../../helpers/chat/responses");
+
+const fs = require("fs");
+const path = require("path");
+const { safeJsonParse } = require("../../http");
+const cacheFolder = path.resolve(
+  process.env.STORAGE_DIR
+    ? path.resolve(process.env.STORAGE_DIR, "models", "apipie")
+    : path.resolve(__dirname, `../../../storage/models/apipie`)
+);
+
+class ApiPieLLM {
+  constructor(embedder = null, modelPreference = null) {
+    if (!process.env.APIPIE_LLM_API_KEY)
+      throw new Error("No ApiPie LLM API key was set.");
+
+    const { OpenAI: OpenAIApi } = require("openai");
+    this.basePath = "https://apipie.ai/v1";
+    this.openai = new OpenAIApi({
+      baseURL: this.basePath,
+      apiKey: process.env.APIPIE_LLM_API_KEY ?? null,
+    });
+    this.model =
+      modelPreference ||
+      process.env.APIPIE_LLM_MODEL_PREF ||
+      "openrouter/mistral-7b-instruct";
+    this.limits = {
+      history: this.promptWindowLimit() * 0.15,
+      system: this.promptWindowLimit() * 0.15,
+      user: this.promptWindowLimit() * 0.7,
+    };
+
+    this.embedder = embedder ?? new NativeEmbedder();
+    this.defaultTemp = 0.7;
+
+    if (!fs.existsSync(cacheFolder))
+      fs.mkdirSync(cacheFolder, { recursive: true });
+    this.cacheModelPath = path.resolve(cacheFolder, "models.json");
+    this.cacheAtPath = path.resolve(cacheFolder, ".cached_at");
+  }
+
+  log(text, ...args) {
+    console.log(`\x1b[36m[${this.constructor.name}]\x1b[0m ${text}`, ...args);
+  }
+
+  // This checks if the .cached_at file has a timestamp that is more than 1Week (in millis)
+  // from the current date. If it is, then we will refetch the API so that all the models are up
+  // to date.
+  #cacheIsStale() {
+    const MAX_STALE = 6.048e8; // 1 Week in MS
+    if (!fs.existsSync(this.cacheAtPath)) return true;
+    const now = Number(new Date());
+    const timestampMs = Number(fs.readFileSync(this.cacheAtPath));
+    return now - timestampMs > MAX_STALE;
+  }
+
+  // This function fetches the models from the ApiPie API and caches them locally.
+  // We do this because the ApiPie API has a lot of models, and we need to get the proper token context window
+  // for each model and this is a constructor property - so we can really only get it if this cache exists.
+  // We used to have this as a chore, but given there is an API to get the info - this makes little sense.
+  // This might slow down the first request, but we need the proper token context window
+  // for each model and this is a constructor property - so we can really only get it if this cache exists.
+  async #syncModels() {
+    if (fs.existsSync(this.cacheModelPath) && !this.#cacheIsStale())
+      return false;
+
+    this.log("Model cache is not present or stale. Fetching from ApiPie API.");
+    await fetchApiPieModels();
+    return;
+  }
+
+  #appendContext(contextTexts = []) {
+    if (!contextTexts || !contextTexts.length) return "";
+    return (
+      "\nContext:\n" +
+      contextTexts
+        .map((text, i) => {
+          return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+        })
+        .join("")
+    );
+  }
+
+  models() {
+    if (!fs.existsSync(this.cacheModelPath)) return {};
+    return safeJsonParse(
+      fs.readFileSync(this.cacheModelPath, { encoding: "utf-8" }),
+      {}
+    );
+  }
+
+  streamingEnabled() {
+    return "streamGetChatCompletion" in this;
+  }
+
+  static promptWindowLimit(modelName) {
+    const cacheModelPath = path.resolve(cacheFolder, "models.json");
+    const availableModels = fs.existsSync(cacheModelPath)
+      ? safeJsonParse(
+          fs.readFileSync(cacheModelPath, { encoding: "utf-8" }),
+          {}
+        )
+      : {};
+    return availableModels[modelName]?.maxLength || 4096;
+  }
+
+  promptWindowLimit() {
+    const availableModels = this.models();
+    return availableModels[this.model]?.maxLength || 4096;
+  }
+
+  async isValidChatCompletionModel(model = "") {
+    await this.#syncModels();
+    const availableModels = this.models();
+    return availableModels.hasOwnProperty(model);
+  }
+
+  /**
+   * Generates appropriate content array for a message + attachments.
+   * @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
+   * @returns {string|object[]}
+   */
+  #generateContent({ userPrompt, attachments = [] }) {
+    if (!attachments.length) {
+      return userPrompt;
+    }
+
+    const content = [{ type: "text", text: userPrompt }];
+    for (let attachment of attachments) {
+      content.push({
+        type: "image_url",
+        image_url: {
+          url: attachment.contentString,
+          detail: "auto",
+        },
+      });
+    }
+    return content.flat();
+  }
+
+  constructPrompt({
+    systemPrompt = "",
+    contextTexts = [],
+    chatHistory = [],
+    userPrompt = "",
+    attachments = [],
+  }) {
+    const prompt = {
+      role: "system",
+      content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
+    };
+    return [
+      prompt,
+      ...chatHistory,
+      {
+        role: "user",
+        content: this.#generateContent({ userPrompt, attachments }),
+      },
+    ];
+  }
+
+  async getChatCompletion(messages = null, { temperature = 0.7 }) {
+    if (!(await this.isValidChatCompletionModel(this.model)))
+      throw new Error(
+        `ApiPie chat: ${this.model} is not valid for chat completion!`
+      );
+
+    const result = await this.openai.chat.completions
+      .create({
+        model: this.model,
+        messages,
+        temperature,
+      })
+      .catch((e) => {
+        throw new Error(e.message);
+      });
+
+    if (!result.hasOwnProperty("choices") || result.choices.length === 0)
+      return null;
+    return result.choices[0].message.content;
+  }
+
+  // APIPie says it supports streaming, but it does not work across all models and providers.
+  // Notably, it is not working for OpenRouter models at all.
+  // async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
+  //   if (!(await this.isValidChatCompletionModel(this.model)))
+  //     throw new Error(
+  //       `ApiPie chat: ${this.model} is not valid for chat completion!`
+  //     );
+
+  //   const streamRequest = await this.openai.chat.completions.create({
+  //     model: this.model,
+  //     stream: true,
+  //     messages,
+  //     temperature,
+  //   });
+  //   return streamRequest;
+  // }
+
+  handleStream(response, stream, responseProps) {
+    const { uuid = uuidv4(), sources = [] } = responseProps;
+
+    return new Promise(async (resolve) => {
+      let fullText = "";
+
+      // Establish listener to early-abort a streaming response
+      // in case things go sideways or the user does not like the response.
+      // We preserve the generated text but continue as if chat was completed
+      // to preserve previously generated content.
+      const handleAbort = () => clientAbortedHandler(resolve, fullText);
+      response.on("close", handleAbort);
+
+      try {
+        for await (const chunk of stream) {
+          const message = chunk?.choices?.[0];
+          const token = message?.delta?.content;
+
+          if (token) {
+            fullText += token;
+            writeResponseChunk(response, {
+              uuid,
+              sources: [],
+              type: "textResponseChunk",
+              textResponse: token,
+              close: false,
+              error: false,
+            });
+          }
+
+          if (message === undefined || message.finish_reason !== null) {
+            writeResponseChunk(response, {
+              uuid,
+              sources,
+              type: "textResponseChunk",
+              textResponse: "",
+              close: true,
+              error: false,
+            });
+            response.removeListener("close", handleAbort);
+            resolve(fullText);
+          }
+        }
+      } catch (e) {
+        writeResponseChunk(response, {
+          uuid,
+          sources,
+          type: "abort",
+          textResponse: null,
+          close: true,
+          error: e.message,
+        });
+        response.removeListener("close", handleAbort);
+        resolve(fullText);
+      }
+    });
+  }
+
+  // handleStream(response, stream, responseProps) {
+  //   return handleDefaultStreamResponseV2(response, stream, responseProps);
+  // }
+
+  // Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
+  async embedTextInput(textInput) {
+    return await this.embedder.embedTextInput(textInput);
+  }
+  async embedChunks(textChunks = []) {
+    return await this.embedder.embedChunks(textChunks);
+  }
+
+  async compressMessages(promptArgs = {}, rawHistory = []) {
+    const { messageArrayCompressor } = require("../../helpers/chat");
+    const messageArray = this.constructPrompt(promptArgs);
+    return await messageArrayCompressor(this, messageArray, rawHistory);
+  }
+}
+
+async function fetchApiPieModels(providedApiKey = null) {
+  const apiKey = providedApiKey || process.env.APIPIE_LLM_API_KEY || null;
+  return await fetch(`https://apipie.ai/v1/models`, {
+    method: "GET",
+    headers: {
+      "Content-Type": "application/json",
+      ...(apiKey ? { Authorization: `Bearer ${apiKey}` } : {}),
+    },
+  })
+    .then((res) => res.json())
+    .then(({ data = [] }) => {
+      const models = {};
+      data.forEach((model) => {
+        models[`${model.provider}/${model.model}`] = {
+          id: `${model.provider}/${model.model}`,
+          name: `${model.provider}/${model.model}`,
+          organization: model.provider,
+          maxLength: model.max_tokens,
+        };
+      });
+
+      // Cache all response information
+      if (!fs.existsSync(cacheFolder))
+        fs.mkdirSync(cacheFolder, { recursive: true });
+      fs.writeFileSync(
+        path.resolve(cacheFolder, "models.json"),
+        JSON.stringify(models),
+        {
+          encoding: "utf-8",
+        }
+      );
+      fs.writeFileSync(
+        path.resolve(cacheFolder, ".cached_at"),
+        String(Number(new Date())),
+        {
+          encoding: "utf-8",
+        }
+      );
+
+      return models;
+    })
+    .catch((e) => {
+      console.error(e);
+      return {};
+    });
+}
+
+module.exports = {
+  ApiPieLLM,
+  fetchApiPieModels,
+};
diff --git a/server/utils/agents/aibitat/index.js b/server/utils/agents/aibitat/index.js
index 1d356f00ad086e3c2d050892175d1d43880897d4..cabedb7f850cf69e3259f8f3788dec35c446b44e 100644
--- a/server/utils/agents/aibitat/index.js
+++ b/server/utils/agents/aibitat/index.js
@@ -785,6 +785,8 @@ ${this.getHistory({ to: route.to })
         return new Providers.FireworksAIProvider({ model: config.model });
       case "deepseek":
         return new Providers.DeepSeekProvider({ model: config.model });
+      case "apipie":
+        return new Providers.ApiPieProvider({ model: config.model });
 
       default:
         throw new Error(
diff --git a/server/utils/agents/aibitat/providers/ai-provider.js b/server/utils/agents/aibitat/providers/ai-provider.js
index 3a144ec6ccf718d2a1664f57d7c048229a522e52..5e64e8f26fe865a1fce5539345e0491892d05573 100644
--- a/server/utils/agents/aibitat/providers/ai-provider.js
+++ b/server/utils/agents/aibitat/providers/ai-provider.js
@@ -182,6 +182,14 @@ class Provider {
           apiKey: process.env.DEEPSEEK_API_KEY ?? null,
           ...config,
         });
+      case "apipie":
+        return new ChatOpenAI({
+          configuration: {
+            baseURL: "https://apipie.ai/v1",
+          },
+          apiKey: process.env.APIPIE_LLM_API_KEY ?? null,
+          ...config,
+        });
       default:
         throw new Error(`Unsupported provider ${provider} for this task.`);
     }
diff --git a/server/utils/agents/aibitat/providers/apipie.js b/server/utils/agents/aibitat/providers/apipie.js
new file mode 100644
index 0000000000000000000000000000000000000000..4c6a3c8bf21c3fbcbc9e8fed080cf520253d7244
--- /dev/null
+++ b/server/utils/agents/aibitat/providers/apipie.js
@@ -0,0 +1,116 @@
+const OpenAI = require("openai");
+const Provider = require("./ai-provider.js");
+const InheritMultiple = require("./helpers/classes.js");
+const UnTooled = require("./helpers/untooled.js");
+
+/**
+ * The agent provider for the OpenRouter provider.
+ */
+class ApiPieProvider extends InheritMultiple([Provider, UnTooled]) {
+  model;
+
+  constructor(config = {}) {
+    const { model = "openrouter/llama-3.1-8b-instruct" } = config;
+    super();
+    const client = new OpenAI({
+      baseURL: "https://apipie.ai/v1",
+      apiKey: process.env.APIPIE_LLM_API_KEY,
+      maxRetries: 3,
+    });
+
+    this._client = client;
+    this.model = model;
+    this.verbose = true;
+  }
+
+  get client() {
+    return this._client;
+  }
+
+  async #handleFunctionCallChat({ messages = [] }) {
+    return await this.client.chat.completions
+      .create({
+        model: this.model,
+        temperature: 0,
+        messages,
+      })
+      .then((result) => {
+        if (!result.hasOwnProperty("choices"))
+          throw new Error("ApiPie chat: No results!");
+        if (result.choices.length === 0)
+          throw new Error("ApiPie chat: No results length!");
+        return result.choices[0].message.content;
+      })
+      .catch((_) => {
+        return null;
+      });
+  }
+
+  /**
+   * Create a completion based on the received messages.
+   *
+   * @param messages A list of messages to send to the API.
+   * @param functions
+   * @returns The completion.
+   */
+  async complete(messages, functions = null) {
+    try {
+      let completion;
+      if (functions.length > 0) {
+        const { toolCall, text } = await this.functionCall(
+          messages,
+          functions,
+          this.#handleFunctionCallChat.bind(this)
+        );
+
+        if (toolCall !== null) {
+          this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
+          this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
+          return {
+            result: null,
+            functionCall: {
+              name: toolCall.name,
+              arguments: toolCall.arguments,
+            },
+            cost: 0,
+          };
+        }
+        completion = { content: text };
+      }
+
+      if (!completion?.content) {
+        this.providerLog(
+          "Will assume chat completion without tool call inputs."
+        );
+        const response = await this.client.chat.completions.create({
+          model: this.model,
+          messages: this.cleanMsgs(messages),
+        });
+        completion = response.choices[0].message;
+      }
+
+      // The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
+      // from calling the exact same function over and over in a loop within a single chat exchange
+      // _but_ we should enable it to call previously used tools in a new chat interaction.
+      this.deduplicator.reset("runs");
+      return {
+        result: completion.content,
+        cost: 0,
+      };
+    } catch (error) {
+      throw error;
+    }
+  }
+
+  /**
+   * Get the cost of the completion.
+   *
+   * @param _usage The completion to get the cost for.
+   * @returns The cost of the completion.
+   */
+  getCost(_usage) {
+    return 0;
+  }
+}
+
+module.exports = ApiPieProvider;
diff --git a/server/utils/agents/aibitat/providers/index.js b/server/utils/agents/aibitat/providers/index.js
index 086e0ccf0f04c3a9c8fde389267e83dabc18663d..507bf181b0e0a0a39c5937f98e0d97505f90c925 100644
--- a/server/utils/agents/aibitat/providers/index.js
+++ b/server/utils/agents/aibitat/providers/index.js
@@ -15,6 +15,7 @@ const TextWebGenUiProvider = require("./textgenwebui.js");
 const AWSBedrockProvider = require("./bedrock.js");
 const FireworksAIProvider = require("./fireworksai.js");
 const DeepSeekProvider = require("./deepseek.js");
+const ApiPieProvider = require("./apipie.js");
 
 module.exports = {
   OpenAIProvider,
@@ -34,4 +35,5 @@ module.exports = {
   TextWebGenUiProvider,
   AWSBedrockProvider,
   FireworksAIProvider,
+  ApiPieProvider,
 };
diff --git a/server/utils/agents/index.js b/server/utils/agents/index.js
index 3936f93884f58846063d538b694ad350c1d3873a..ffa65c753fc09da38a392fd6da02779ee1784d9f 100644
--- a/server/utils/agents/index.js
+++ b/server/utils/agents/index.js
@@ -166,6 +166,10 @@ class AgentHandler {
         if (!process.env.DEEPSEEK_API_KEY)
           throw new Error("DeepSeek API Key must be provided to use agents.");
         break;
+      case "apipie":
+        if (!process.env.APIPIE_LLM_API_KEY)
+          throw new Error("ApiPie API Key must be provided to use agents.");
+        break;
 
       default:
         throw new Error(
@@ -212,6 +216,8 @@ class AgentHandler {
         return null;
       case "deepseek":
         return "deepseek-chat";
+      case "apipie":
+        return null;
       default:
         return "unknown";
     }
diff --git a/server/utils/helpers/customModels.js b/server/utils/helpers/customModels.js
index f061d35ff1f6432f20698fee8a7ddfdefaf9f7cb..f3430cecc05336088ec513395c0cd48a71d48a2d 100644
--- a/server/utils/helpers/customModels.js
+++ b/server/utils/helpers/customModels.js
@@ -1,4 +1,5 @@
 const { fetchOpenRouterModels } = require("../AiProviders/openRouter");
+const { fetchApiPieModels } = require("../AiProviders/apipie");
 const { perplexityModels } = require("../AiProviders/perplexity");
 const { togetherAiModels } = require("../AiProviders/togetherAi");
 const { fireworksAiModels } = require("../AiProviders/fireworksAi");
@@ -19,6 +20,7 @@ const SUPPORT_CUSTOM_MODELS = [
   "elevenlabs-tts",
   "groq",
   "deepseek",
+  "apipie",
 ];
 
 async function getCustomModels(provider = "", apiKey = null, basePath = null) {
@@ -56,6 +58,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) {
       return await getGroqAiModels(apiKey);
     case "deepseek":
       return await getDeepSeekModels(apiKey);
+    case "apipie":
+      return await getAPIPieModels(apiKey);
     default:
       return { models: [], error: "Invalid provider for custom models" };
   }
@@ -355,6 +359,21 @@ async function getOpenRouterModels() {
   return { models, error: null };
 }
 
+async function getAPIPieModels(apiKey = null) {
+  const knownModels = await fetchApiPieModels(apiKey);
+  if (!Object.keys(knownModels).length === 0)
+    return { models: [], error: null };
+
+  const models = Object.values(knownModels).map((model) => {
+    return {
+      id: model.id,
+      organization: model.organization,
+      name: model.name,
+    };
+  });
+  return { models, error: null };
+}
+
 async function getMistralModels(apiKey = null) {
   const { OpenAI: OpenAIApi } = require("openai");
   const openai = new OpenAIApi({
diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js
index 6f2dd79d40c723d3df2d4992ae7fdeb6d4136ef2..f3f19fb9d9546b53eedbf9dd874e79446f83893b 100644
--- a/server/utils/helpers/index.js
+++ b/server/utils/helpers/index.js
@@ -162,6 +162,9 @@ function getLLMProvider({ provider = null, model = null } = {}) {
     case "deepseek":
       const { DeepSeekLLM } = require("../AiProviders/deepseek");
       return new DeepSeekLLM(embedder, model);
+    case "apipie":
+      const { ApiPieLLM } = require("../AiProviders/apipie");
+      return new ApiPieLLM(embedder, model);
     default:
       throw new Error(
         `ENV: No valid LLM_PROVIDER value found in environment! Using ${process.env.LLM_PROVIDER}`
@@ -285,6 +288,12 @@ function getLLMProviderClass({ provider = null } = {}) {
     case "bedrock":
       const { AWSBedrockLLM } = require("../AiProviders/bedrock");
       return AWSBedrockLLM;
+    case "deepseek":
+      const { DeepSeekLLM } = require("../AiProviders/deepseek");
+      return DeepSeekLLM;
+    case "apipie":
+      const { ApiPieLLM } = require("../AiProviders/apipie");
+      return ApiPieLLM;
     default:
       return null;
   }
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index db5cfe0e3a15840cab65029ea8e575134d572eb0..160e85d442fa9d4cb1f6e96c1eea9016f7d6618b 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -515,6 +515,16 @@ const KEY_MAPPING = {
     envKey: "DEEPSEEK_MODEL_PREF",
     checks: [isNotEmpty],
   },
+
+  // APIPie Options
+  ApipieLLMApiKey: {
+    envKey: "APIPIE_LLM_API_KEY",
+    checks: [isNotEmpty],
+  },
+  ApipieLLMModelPref: {
+    envKey: "APIPIE_LLM_MODEL_PREF",
+    checks: [isNotEmpty],
+  },
 };
 
 function isNotEmpty(input = "") {
@@ -617,6 +627,7 @@ function supportedLLM(input = "") {
     "generic-openai",
     "bedrock",
     "deepseek",
+    "apipie",
   ].includes(input);
   return validSelection ? null : `${input} is not a valid LLM provider.`;
 }