From bce7988683cc7d71fcea8587a3797796f659ef9b Mon Sep 17 00:00:00 2001
From: Timothy Carambat <rambat1010@gmail.com>
Date: Tue, 15 Oct 2024 12:36:06 -0700
Subject: [PATCH] Integrate Apipie support directly (#2470)

resolves #2464
resolves #989
Note: Streaming not supported
---
 .vscode/settings.json                         |   5 +-
 docker/.env.example                           |   4 +
 .../LLMSelection/ApiPieOptions/index.jsx      | 101 ++++++
 frontend/src/media/llmprovider/apipie.png     | Bin 0 -> 14798 bytes
 .../GeneralSettings/LLMPreference/index.jsx   |  36 +-
 .../Steps/DataHandling/index.jsx              |   8 +
 .../Steps/LLMPreference/index.jsx             |   9 +
 .../AgentConfig/AgentLLMSelection/index.jsx   |   1 +
 server/.env.example                           |   4 +
 server/models/systemSettings.js               |   4 +
 server/storage/models/.gitignore              |   3 +-
 server/utils/AiProviders/apipie/index.js      | 336 ++++++++++++++++++
 server/utils/agents/aibitat/index.js          |   2 +
 .../agents/aibitat/providers/ai-provider.js   |   8 +
 .../utils/agents/aibitat/providers/apipie.js  | 116 ++++++
 .../utils/agents/aibitat/providers/index.js   |   2 +
 server/utils/agents/index.js                  |   6 +
 server/utils/helpers/customModels.js          |  19 +
 server/utils/helpers/index.js                 |   9 +
 server/utils/helpers/updateENV.js             |  11 +
 20 files changed, 668 insertions(+), 16 deletions(-)
 create mode 100644 frontend/src/components/LLMSelection/ApiPieOptions/index.jsx
 create mode 100644 frontend/src/media/llmprovider/apipie.png
 create mode 100644 server/utils/AiProviders/apipie/index.js
 create mode 100644 server/utils/agents/aibitat/providers/apipie.js

diff --git a/.vscode/settings.json b/.vscode/settings.json
index d60238c72..1409c1073 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -5,6 +5,7 @@
     "AIbitat",
     "allm",
     "anythingllm",
+    "Apipie",
     "Astra",
     "Chartable",
     "cleancss",
@@ -18,6 +19,7 @@
     "elevenlabs",
     "Embeddable",
     "epub",
+    "fireworksai",
     "GROQ",
     "hljs",
     "huggingface",
@@ -40,14 +42,13 @@
     "pagerender",
     "Qdrant",
     "royalblue",
-    "searxng",
     "SearchApi",
+    "searxng",
     "Serper",
     "Serply",
     "streamable",
     "textgenwebui",
     "togetherai",
-    "fireworksai",
     "Unembed",
     "vectordbs",
     "Weaviate",
diff --git a/docker/.env.example b/docker/.env.example
index e67ac5ddd..55f3b2627 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -105,6 +105,10 @@ GID='1000'
 # FIREWORKS_AI_LLM_API_KEY='my-fireworks-ai-key'
 # FIREWORKS_AI_LLM_MODEL_PREF='accounts/fireworks/models/llama-v3p1-8b-instruct'
 
+# LLM_PROVIDER='apipie'
+# APIPIE_LLM_API_KEY='sk-123abc'
+# APIPIE_LLM_MODEL_PREF='openrouter/llama-3.1-8b-instruct'
+
 ###########################################
 ######## Embedding API SElECTION ##########
 ###########################################
diff --git a/frontend/src/components/LLMSelection/ApiPieOptions/index.jsx b/frontend/src/components/LLMSelection/ApiPieOptions/index.jsx
new file mode 100644
index 000000000..9bb16ae3d
--- /dev/null
+++ b/frontend/src/components/LLMSelection/ApiPieOptions/index.jsx
@@ -0,0 +1,101 @@
+import System from "@/models/system";
+import { useState, useEffect } from "react";
+
+export default function ApiPieLLMOptions({ settings }) {
+  return (
+    <div className="flex flex-col gap-y-4 mt-1.5">
+      <div className="flex gap-[36px]">
+        <div className="flex flex-col w-60">
+          <label className="text-white text-sm font-semibold block mb-3">
+            APIpie API Key
+          </label>
+          <input
+            type="password"
+            name="ApipieLLMApiKey"
+            className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
+            placeholder="APIpie API Key"
+            defaultValue={settings?.ApipieLLMApiKey ? "*".repeat(20) : ""}
+            required={true}
+            autoComplete="off"
+            spellCheck={false}
+          />
+        </div>
+        {!settings?.credentialsOnly && (
+          <APIPieModelSelection settings={settings} />
+        )}
+      </div>
+    </div>
+  );
+}
+
+function APIPieModelSelection({ settings }) {
+  const [groupedModels, setGroupedModels] = useState({});
+  const [loading, setLoading] = useState(true);
+
+  useEffect(() => {
+    async function findCustomModels() {
+      setLoading(true);
+      const { models } = await System.customModels("apipie");
+      if (models?.length > 0) {
+        const modelsByOrganization = models.reduce((acc, model) => {
+          acc[model.organization] = acc[model.organization] || [];
+          acc[model.organization].push(model);
+          return acc;
+        }, {});
+
+        setGroupedModels(modelsByOrganization);
+      }
+
+      setLoading(false);
+    }
+    findCustomModels();
+  }, []);
+
+  if (loading || Object.keys(groupedModels).length === 0) {
+    return (
+      <div className="flex flex-col w-60">
+        <label className="text-white text-sm font-semibold block mb-3">
+          Chat Model Selection
+        </label>
+        <select
+          name="ApipieLLMModelPref"
+          disabled={true}
+          className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
+        >
+          <option disabled={true} selected={true}>
+            -- loading available models --
+          </option>
+        </select>
+      </div>
+    );
+  }
+
+  return (
+    <div className="flex flex-col w-60">
+      <label className="text-white text-sm font-semibold block mb-3">
+        Chat Model Selection
+      </label>
+      <select
+        name="ApipieLLMModelPref"
+        required={true}
+        className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
+      >
+        {Object.keys(groupedModels)
+          .sort()
+          .map((organization) => (
+            <optgroup key={organization} label={organization}>
+              {groupedModels[organization].map((model) => (
+                <option
+                  key={model.id}
+                  value={model.id}
+                  selected={settings?.ApipieLLMModelPref === model.id}
+                >
+                  {model.name}
+                </option>
+              ))}
+            </optgroup>
+          ))}
+      </select>
+    </div>
+  );
+}
diff --git a/frontend/src/media/llmprovider/apipie.png b/frontend/src/media/llmprovider/apipie.png
new file mode 100644
index 0000000000000000000000000000000000000000..f7faf500283cd6aba54afd8691a87df3ebb2a559
GIT binary patch
literal 14798
zcmeHucQ_o})~`fD5JV6}O+@dbGYHXp^v+<EXv3({5~8<Y5WN%8JJF)YXhEX)8qtD8
zCwF_#_r3R=^WA&@xaGg=c|6AMd+k+!YfYHCsvJJ<LtG3D415K7X$=gF8;aMzcd$T<
zH_L<D;13Q$ULT2pLC|&mdn193fD!}a*1oNlo~xdUvWSI~11HSV$sEq<>3{&EF)*N>
zBH*I~+!aRW>0s}O6!8>e`fG#;_<Y^W#UzeP_a8q{7fUM<4Qbi`7z+LqW3q8|MTl^5
zd3bnmdhl{Oxma`Y2n!2yaYMKu5DqYc1L@`H3iIS}MACzm{&gv7IMTw!7U62^<VbhD
zG|b$|%~gzv>F-hhIFhBs-`gPET<rha#L|KbZVz{WJGvsdcsP0f<&>5dBCfWs_V9l@
zxTL-7zqQ!gUf)E-9_DB*#^lLi3Aci|*}F1{12Q3CuD0g(a5@e;2oIF&`f&EP|1|rr
zmVaH4>wmEAU&H?Cmj9askaTr*u{Hmj2#!cj*k3Jf*OcP~r{#iKUXy|QU&!!}St62_
z$bUNc^-VP4&i~c|2yxwkfVm*yT3!ga7?TDZ>Ez~O0srUM|BnTqxxitra7%FrH-wLa
zhns_kAIkM_U4K6y^glXeJY5wm#Rd3<tt??Y!W;sELJ$r<VIgh~a~^&k4q<aXb3qF}
zehYJM%fHw9$CUrFlB_vch#$fa0gLhQ@dyg@3vvHF=daHHGDXYD&BEsTTH=tuNB;M=
z|33VG^34BAuK&F1zmoO;(nBNuNusV_`7cqueqYc5-k0}Z-WRlqNV>V&IJt;x**efY
zgSo&Q%wew3>%sro@o$&=KW9atEdGTL|30B74Dnw$5DXUii(8yrw49vm#h<|(-C=ah
z8gNHTxC`8c1<LjBBmP(C{ZB3QZ)1Qy`X_GY`dgog&$U@6V_@(FDo9Ied8X~mdAJ&H
zBrlxrH63r>POy1$BcR12_J?uZlYSk_{&7#JJ7e;IGJ0}r-iYa#ku7WjU9wu%H>=;+
zsILhjjyZ2YS25_<X_s*7$9-cCPsg}%^H!+enQ+sb1l>!^<gYiLzB+F{hi3Zc)pi}+
z@y=ZEI`+#v_D$IEqLI4ui1YK$CX7dK-n-pLN!@uKbSwX!l-46jmN)mb<<)T?Nt6En
z=Kn`DdDMn?FUh1Esm?5=n62)Y#A{7M!!Qh+<TP74Ggj5c<>tA=bB>H=4SWl^!xs1s
z62`2Vo9X8ppH=cKgmsJr8sZRZH7Yx~=dwO<Xg0vIubcd=*+Rv_J7a;tmkv|@{XHD`
zeI2a+NOD_xsX(FvUXsGW$ao#tec4bH5o5m86RpsON3{J0^tyRND6tTy_2@$buYf<3
z%_033J8u$boXqa#>sfVuhphN;ct<`j{O+MZ#_&N|Cup>Ud!+<N@C&74H3Vg;EhJM-
zls_0bVk0eY`wRX+>pseDy?c~7=7B0Q3(_|v&$iWp?;3pHFNxop2kxR<mDC&23Du{{
z+5UD#e-Z?C&K>8IE4B4A5y5;FmeU<s#V|TBB#&qKc+j3ea0JHq{mx+YQxyin?WB~0
z=7;`%L=o#HnyZ;nV!fTGH&`1=aPm)S9dwbJKElu8XU6ks3D6Mi!^;a(VlE_lh-j5k
z=xFynQa3MQZbU~wE=7;h(M+3}fqqcw^Tje*+lfqD#66|qBcs-&y%o{M_{b(WM!_5|
zsS#%cXYN#J4j)C)8&o?kuR><}3Ic8r5tbfQwf_#GL{(GZ*(}sG*3Fw%0a0!wttykb
z6PQQT@nv;-Q&AS>wk!`1=2E^0$b5iR)Xb!(!P@nwX`6LdsdVIpdGC>t2}jIfh{do=
z1tf_0yYrfp;`@6oOMDZ(9~(r*+mGb&0Lh+MKDJT#99l&{B9_|LPqT)7DsBbcnz%}h
zQfjZBqIIxNrWmubG-sBXz7zkOeV$cK<EMR_(#Je@qn@<;<%zwXt$b)_(atxTvNr~0
zQK5{)t{>lKm0*@9^q$ft2U1lP_^-5iebO(Kwq_n0m#@~LkGq4_|4e~Ehfx^5@xxKe
zOLe|l*k0w(#|xD^emCBcg4l+~eA<=os|XJIm8c!rY3eqNhVAb2UulO|5Al+S3O~2n
zQ1(B=QC1N*Qe;_XmDZzZ))H45ul?|+0HfHGlWB*YD{<Im0L{IWXv1%17-?B>u`I8S
zH5-GAdEu~rCs6eY(|;~f^AY?_Bl~p0H~SazECz$*7LH-{Y?`k#CX<ViuTNc0**hC4
zwtd3Z)gvvLa|VkO4Co~(%nEm){t3ce$nMw=G5MS{U=IG0?8|4i4m49F)wyUL-k!sJ
zqMme@bn#bz3aa4F2*lgKXMvGqF4A}7hdje8venr1O5bMPeIH<V__Q_CX+xEJJ#qb)
zchz29ni2J0)>Pz+YJyp<a1^d4csuqe^!tM}WeQnHq_vf&^%=VyWJTGKL5Go2J|q|N
zF%8>yem26(s~XZycyO6kg+yM73D9sQ3?FFi-%YoYDt6DtqVJgNGs$4dSlVB{=iU;-
z=78OURIm9I7^%HJReeb{7>#5X9r6{MX{^glYA71!KZ>Ur)-^Z5EgQ55dSt>HWj=@u
zGwmt;nXvu*Y@0n=mZPpf<JTLF3&)pYQC3}hE*FBD0m?YD{tdx2+!_P>gRO$6@t6`D
zslHASqk+X2uIZPLB`Ka>a|vK#lwtDmUhTeTF0@^+syErs<8}<^{op?BN&@k??~F#h
z_3kyR=8<dJE?1l(0S^1ilNkO-L)8}&d(hseeg+*?;b$$WH-DfKR+>)5=b6>mPllOa
z3J|WJ3#%vRxV%oxG00PQ4Z80m&=UJQR#s)cf>Yd@J>s}^Wc7S1qdzv8r}gDBf9uKk
z3e<01h_tXZ<!Iws0L09MIp(Jg>}^T?&kQLt@(Scd`9SRLD8)+>ZN{d@92f2{9DJi$
z4@tK-Z)O-@;tgT;^xtmv{^4+S_`1;3@`M?V(|`D(Q<SBFK)b+`S>h9m-7c{ZnZ^;7
zpUK1Ju#S5&y5)pw@kDDDVKil79^(`ESH(DKr@oD(_7P;g*e7H4nd|46*bMB$xn;Y#
zbp1^NYDuy{q{3V7?C@+P3it_oqlgMai$~b2ib#O8UYNWL)EPV^b11l_DO0iGf5}$X
z$;A|MIu@vQC=t^#5UJfU_C1A*AIOf7dh%k(sbBC8p~-7r+Qf|mS$F{Ly4v$E_FBo@
zQbrV8i}Ya`XP&yYqTI2W$2JEo>=R2no7ETfom@Yyu`?Mg6b`S1@1t(K_29_hi27;d
z>^gFe&6Ex??WYDR>Y-uoTo-Bhu6mq*-2CSrMEt@UdrQ&dRkYFeng=xLYMaDVd|&Bm
ztKGU!j{!&N33Khn(!RYd%E?MCt|=5J4bS<fkWTVaaifLO!3I};C}fJlVX5_NirK>V
zQ{2e4tY)EHKeCu5sl@WM69#|xpZZmaCJyFGkG>qMiIG>hKv5Fil6EeASJBPZ`+BfY
zL3I3~quH@xHPPDcDyw67`OhX){*o7WvMFk0)jJ)pzdYS1y-n!seqym&>ZHcotgr7@
zye_IBL%Bii4EWq+Lhy}~{sGZQq1&a5?owzo&4w_4)O^;rxB=n)cgDWFEfqLvFN~n7
zWL78Wu!)l&eG^+E><rBOYfH{uDeLE_epu_38Q$578H0!U&uNAT)yKCni3OE8V``Ds
z=GqmGo-?oeZjvOHO?=yjMq4wU^wYO3E@jiZQ6|3r==4KjzSgTsXrS{{Gs)80DP~IL
zRa??tssq`mO<$_HJM~H7*w!XK|B3O?vXEd(8cRTR%|yqM5rJ~d&kZ0P@$^j2eBPH;
z<WHFSS6|Wqn|;CTA3ne^`)#IU?I}F2+&V_{=dbBZrkuO(a)yl(wZZE59tN{x7vQQl
z^EnujPAt{-J-XWMDoz_v)o`-Duc-dQ2F-COZ5z6wn45I5mw`b0NchK@Cid1o)V|5i
zYd)Lt-O8y(W$*p3<|$Gk&F(4xHfh|t?iFj*pJa0W?b7+4eOL1abdp6BGf1y|XwQOG
zwEC~5RY|fKanb&IBvao<(o!9#cZ!u_(;eVyHhg?@;e!@+)HWZ5L_ailG4|i4As70b
zq;?UfLCxWY%K93*;gwc*2sI+l8%Ce$$c@)ts}qavNA`oV3AUu`XP#t&y}D-Kh00uK
zr{s3iBwC_q5FfcakJVZRHFJL-wrT`u(0dK9PmSJ@Kp&q^=d9cabdPpgSLI$N0atps
zSBBXp_)W=!dDBDqaK)?oMx*r3Q{>u98Yh*gpF3CeLdNg*OQV$(0yK!rHk{Ez`{@@7
z!P@}~Yw8D7CLB@wojz^uD&K+A@vJ05?MEGH>e?O<C*0_6wcwb8sCXy!ir)s6wf7#*
zG5qG(n}V7?T&nC)ALR3i5s_(4+FE&+P!t=!E}Q+Gk9hXJX?wIy1P$S02KJQ4%<C7c
zAz}sthd;j%WeCrZoyZ|@6!9lB`XAYa1KXidmQCF|T#M%mbVQt06CQ<PrNns8Uf#rr
zx$Grp;m5ek-QB0Pye|)bdLj?*E{OhiU!-jZJj?xq<n-l{&@hSI1?QTeawFW&fP8iB
z;N3$0fMFofde5YbW2hT!{Geg<x7kPTbH^%K+4v0-s)$Si+7-FL5KLdxn_KXxYG_jD
zIN!qXZw_>DbHtzpU{gV4Z?emUzlQ)VZg&BKm{zIm!(15kaH?^Rknw@r&5gjjq>yXn
z8^U|$t3Q+g7NPV|gr?D&brsEFEkxSwS9jRbd_J~y>*9F&mAjFrHBB_GIFxm4&&4nP
za`%>M!Kw!X+3M1M$0Q34wYXyY_*P_oV_a4VtZaj4n*{KL5iiiaF4IGY>9Y#K>D@E1
zOAYl;X!SVJ`*ZdYeCNhP<)RY=eERQpGD~19Yd0mR#e0g0JEu!0Q}A%m4|RUp1NR2e
z0NL?i(Vp^b)=XZ;7QUPgx=S+ACDJI^__0oBTW@z;SjbCE*0wy7aKD<j_Z`9Ffh?FS
zu!}m;hux{iT2RRtrYQW3dFnS`rhICQZqAS5u4mG5@hvG95GN@Uogv`c@gLqcTHA)G
zMWw9{7EPzRHtX>_tA#EA*|ks&OJcJ~=~~^cBq~qpJ=Hk6bE?!(JYqie&~~XajMViV
zPMcEl9O-v^0^FCER10Y$oe>t8t2d9zGy<w*6U0JR+=IuG%rxh_QTyx}978mz#i^l?
z29A-zoiImpKh<ray=X?>GqH~KZCtgOBWHv`+twe+C(L$QzzH)-ZbdcOaHLp_-(5hb
zTh;_y|KPH8Xm}mNpj-K9_Pg4R3_;QGw9j0p!Oco*gGIc}0>a88BX)cje=RUn>)8a+
zKdheH(EMqi!AWqaeuZ-1k;2gJnLb7MFsS$`Z%^)Sd-pxm8*Wk0&8Hg96zet<7b<qU
z-hBl5z23zDi(x<@TLNfy9MKgImcwyaPa{`t@a4#n()o@3<|&F@XJ5Jajb?G-fu#ZR
zGArtPWRE5Fb6GPwDGIY}m4{Tfn>A=Q&Kt!!nB3iE3>!Eq<A#)kD-VlTsDGV%wJcO_
z3$>BcTIQ52Y$YAWW|c^mZ9M;F4=J1H4Y5wUNl1ou0{5ET(^%)oQQnn5LRk0+<B%Gh
zwSDobw|0>#55B~!G)f{HB0xYzmpoV^mhaErFmI)Tx};FgyFGH!W%PQ_{VCd}SYq#j
zvm+Qmf6S=MYqKHT_)7N<jk-H^Rfe|jL}56lMdOq|R@cl}?yt3i$u@h2f>Cc}MoIlZ
zXmiT6>W;akQ3O-?Gb8G)#oRC&sT5zcvM3J_Tv?}`3i^3_k{E}u)DOJZG@Imf=}3Wn
z>5KQ>+qyH8K4)DOJb~ou@)RCU)wFJCwBw+zL@lftk>Ds+t`g`JoSKxya4cB~Nc+5K
z($5m$?dgpZBl~SFl+jybX!V%|Q3DxtX6vL}!!apWMwCDTSq<Lw;A+j%L7!cGG>s%2
z%}kBUB8gDT4z&?DIHMySUTPe@qm~e?PpoPu_@P;^K%*vw2l&vUk2$oUA>wTPc5BWV
zX#D+wE*{kvRT<u`1lB#|>;s78da12mCm=1BIX2vnr4u9{(FI11xo7m^Uf<Z#u5@P<
z87713>yan6>%=#r+*x!kj&-sm<%+_`a@<^yE_!O}9+#$_&2#^-Ded-{;4<cbdbkjy
z?I5FscxYfRZ$6RVJBjW*E6NMbgtrhq3WdY4HNz7^QO9*&JCAtW+Z!pJ+dPawsUzz0
zCNyS`L4^C<nbh2kne;uL>p05+0`_Wx?W`7$BVU@iBccE0$wfi&>1;u<UV{BWQ=W{Q
z5bw}b+`NOD<QWx~;^>`9;n6qG`>h+<KfOM<6+;?KUK=<P+N>uP-2Ty(?T^b!!o{9p
z8O--t@d5)7swwB-2#Qj*7bvhP4yn0u0}*LO)_|7);5*McXwDo~o6?|qc$svHTG@2F
z^~82IzX9e<3r9e*Vp!`eiLlbbaru1|Kda-LNpi=>u&;~4oMf9e@ZgYxLLuADdV&iL
zn+S$e%RK@=>)&Ocnya__RUi(VGfb>NLLQ^DL{WL7sG}y7jP=riG%VCwk)DKKl<-1>
z8<itCH6UTo6f&b@|FbDg#+g-cP+2BGgB=o-GZ=|9F5yM&vA~WdLqiTQsj6Q5fznC5
zxf{q4wd2!lO-VV{f5Ewt^LwrE33K<T4NvYYm8t6J9h0Wwh~BY$n4gBw;F}t}bltCa
zoh>#Db~mRKr4Mzv>4VsX4Hvf9CG|UV&}N2tL8%t~mz;0~!28#?(M#^Y>v}*J;iS*k
zu99vyHJ5WN_O`NrKo8C#=z@iVPAO1lCuC@#p9-Y-)a&$0kd+v!CDi@4bl5g9DK4AD
z+fWx_>|H+Y@oLK}GT~sdEF-l=UVXgb4Q?gZbdUCkdz+H}eTLzsnMUGfy@&aacceDH
zk%U#2HpZ`ofj4MmwpJ2my^k_yAy=&Aoc$!>FsnR$2bhjH@~T5%tFji*X!;W`&{<oV
zo<3;om*nysqC;Ug#`O%?<|mQ&lH_K9z6%<UF(q5RDZ!Dk9(2HbTSbma$-%c1QYYYU
z<9pfL@5QcCUVbYdi{#pYKG)pRqftBqTL^8DoSG)nQ<W$$y*TUeXUUf$`WTPTXVZ+S
zro)Kr(ukm7)a?~HQGOxzsI-@KW8+3Ss@Jm$L{`O4lzkdJD07*dSW;)-1<eQcN;8@i
zz}9E0*Q7c-s+3qFpB^LGy$Kcu(ihXwHRF`b><O+yQX*8pn<8%`C1(V`+($j+Niat?
zgn7rcbfM<)Ka;t<hYZ%cFp<09LMji%@w3VCMX>WXUZkSh4G)MArB;!m#KoSq(4O2N
zN#ngHyej8Ylieq@iY0k-k=RFgut(;>3bR<+Uyx)O$;vu2;)*zmgTUt#9_{MYv^$YG
zwWmw^6V=Gj8Y|Ybjv%J&z-K0H_Nc<kc7Z?Mi@fK(ledu1F5l4mdduMpB9jadw;MnH
zazwx}x^&U|RDAYT6r0#$YzIa?Q<FZ8;#-~eZQAs5#qOLqHW*3ikG2LZ1|v-4Ou|*_
z{9uyss3lqPWRw06Z|8eonaGbIPqvafscNli99Z=2gIvO-^cR>yrTM~P-zkJ&>Tuj)
z31rLN@p)X!s4h3tIzgG>3_K)DgXtg)@?lHz;eiw5o`B@w?49V#_KP>)!U%YWIKoJ`
zLu*cq(Q)rRIsH?GI}8*0K1v95I2-ry@+z-a`6X4E?T1D(2NB-QpQLqIY5n?8gY{i`
zD)tVQf0A&=+w}4+_v|90=#|b+Wvk!#XjkZK7D<+iw*norB&I|7`)euUUwybKV=_t9
z4rRRp;tbnz(PJwXX;i|2h5P5z<H3AOZ!^X~w3edDTqkF6VpA33&b#X&tYnU6IZT-4
zQJqfym^SH56+&N#EesV}^k5-lpor)X)i`1LpzIjx;cRElu!NzyJ?VK-!u^^4^ai9X
zV{n|z!u4fJ5&><_pY8Vx`9`Q7%ec1%at62dG_?x_-lR5&PW7K5a=|l&#cdn_pICL0
z!r_?D=uWTm$=bdJ^v{swwLArDl-N7aKOC=S(g}djks#bk0%w-lFdMgryI0cDB*25@
zT|#YHnOlR+OsRysK27RhPg5x(Q7L*e@DlG&ZK|_)njS#-?C7}c`;aF(ySD(}Gf%X?
zud>%~MtrA>X+FSn$<F}>548(-$Xhwv&41W6tZCfDINsi?S!|29*cFYPR(W{gyDMa&
z%JU^xamHDQ&oi91`2^Y<9CEF~-1r3JBR88iD{jF*J&EF`4KH6k(DSC_a->MgBpd!I
zwaG4{;*paaF}AYebJ@q_xg0eCRMzv77h738kLVSjKLe0o1Ak>)!VNJ2^I4Ytdca@x
zkk_+|9*^Ufu(`y0Ju<uv&gFKI<L+)FX`>#dH*wGy;mm<CT}`LzJ`7<PWs{W%uSTCl
zIb6(D=**tWgQ%^Kk*aF@EgP9j{V{)(x3q=!zP(ef`dA&IWwbCM2?FzPl&VN;7j3hd
zpo6l(Ylo9l?Y22}!;VlI74?#n{~5FY{^w_DLcH4^3K3Su6D|g0XBodSt7zd_#vu4#
zKXFmj*Dq)><9{K#SBK}z?}4g~<)gMTnn||}Pr!A`fVNNgU)_@M*}PrAH-ED1BT%BJ
zV3<2^Itj_STLF9su|5;`9s?de{8&PbU-o_)<g59k`m8C@tjO-iYR&s1e~vxcx&<>{
zK1Mvksc38pIqG3->u+EN2d`)P7&&vFZS#&z)0cOe%Z5|LkwP*Y!<hBdA+004(8D7`
zJ$J->ea2XdYW0Vfz&Aaml5~<3o;dVfa>T`xl*Y0`uacTpg`t`vorErzBx1lv*Vq@*
zwVxdr`et`+Nw$ru6ZzBAryYZxqvyXx2v3nauGVW5>ywVxi>*lc@qYkXupzit_KkA<
z?|xmxLhVFs_-&`JZOGpSI_!3bO_346CByCjWHF}PdQ9=j{Sjph@QNi^@2Za?w5Hz5
zBgTg2W^0g1fcHPZqoV9LH?xI{{*cw&f~VUMb~iFLhHF2<AI41&j5J(o_mSXAl&!HN
zgWL_AKOzu!-udlXTLaX6&Q$G7d`ewcVuH;-Yrq=H0f6XW#FDU7_UoR;gY>B0B{ZMu
z5Dk2~)TW*Q`XH>rdhVrovv&;GG^MmyXKb$EkA)yGZ3$NNXzS6fsr$AB_Dx^#@~fem
z(7@8ECdF4n1y;MQ2M^tUl>yTqQTMSrUd(P{Bn`KRI1-%nnkw{Rj?>C5GyFrnn8E2#
z|0J=F_rXngG955Z!}^REQ(x-Xw;7bP8P!ggvUSI0jFwmyPFgr<5n>IjOx1F0j~f2B
zJ5uoBwY39s(_xy4U1Nr)7OdU;hV{-<AAesfy}=@qb3yyO!qPc#C6%AzEIA(%vY+^W
zsYuRvdsQ8~!Mlm_;$9ejZil5?{X9RL#89_%xz(<`@KG#qMs82FSW$bAmohyF1FAI#
zxa^wsUAhSEW_qwI0Zm8=%asUHuiqn~PcMzqV|O{)H|hL3;xczGTWF0h6>9+}c>jcX
zOKbYh;V@-gg$Pw-(#~Fz8%GBF-F-`#e1CqFn8VKOM-SS$6n?us<F?nu_4eOh3LaiZ
z2Nq2^rLzg!_?As?v42hJgIH8?y!L|Adj4_tJzb}Hw6U&HQ1-9y>hirSvlYJBqB`aK
zz`bIU)bGtfFY<2wwh!lnVO1G_URoO^mtapDb<vud|Jt>aw6k##l?s?t^{B*n7*Hxc
z6l*U+H=rt-g`_2$BvZLGf9xxAIV%PD#pPBpBi38>zR9-a+XmH%ggf(^hf8CC`{s_Q
zx9=|6+y{3v(W976G8#<SCs@9bcCAB-A?*{@y|qwNrgVA4<x#p(qpi}QcmQQ^`@9=v
zgX2te3gNFH5HurkyCJ+i)|y-TFvgoF7zD(s$(bZr081S$!<kbLr!JmA?0uW4sFF>>
zEZf-zk5Xnakz(dZihUn-lRIJggC6!oPm?@Li|(7Wn227{aT5P@;jWt8g;?@^`8RfT
z_A4nIGcO!Xd*9kt`L8fKPPK%1zB*%ED6jMT(LshQlS{M|^H89YVOpB`)e5@AZXbZw
zz=y7()V}$vrv5N{&Rf6UCukx6B3vVUr=?}q>omcYkN?^a1EI*LZTk5{d0r4=jhSQZ
ziN$08n$h=ryJtPi0tPe(4c?n@m}xKd=A_rBr@Rm-)R?XFkPzIR)JM|KbwAktBg$iq
z3({5|4Bk^Dj*qE6J%{%pJ&T}d4<u8OO+&kr;WJFy`IaQ(;)yJuj7gjjso5F}!8Y$P
zjwd<oFE}M=aTNF2f&mU5cVW^g`%$V7Lzdz0v{!59-ijG~8U&CI?(SY<f_u{{O4Dtm
z+6AzRTo3$XV<a`3VbZBiZ~hZW7C#l`<XIZizNr1RCpRZKR~Pr);o-DvYR?hBMSpXX
z#$4Zjstrc`h?d49LE~3V?ct2z?B~HIcZ*vWvFB-LCv7Ci0(HLB=i<o*od@dNGeJG+
zk7FKSdA347%!O0ulgzyIZoI7WZKaz2mZt37+uGlF9$tL2R#ct;K5AL;O+q_JqSP~d
zwB-&sh9O=Dj`K-;24*<%n!h`&LMiL^RsolW8g(4nJYM1k)EuH+@Dkj)k-6q40d7x8
z63$59VLi4rMPhr`vPm7p{&k+gOt_ts56dPu9rhN>*viaT(2idj8pLDOL;J@a7LUbU
zDUCTw`I{7NisaS94<QNdXCO#snD{~k)a&9Ly`vgH)~>~uBDj69=?JkBZJf8PG{Z?E
z@N8SJC3x6(+Yv~0?yeNz`tIAzbom`V?2X~RBLa_EJ?j-IlB^@>FPIm~*Sr?{Y*2>B
z+i<(#4-9x*W88Xe=C}VWM>jR>RQbLZ#<Xq?@Hmc;v0#>|PhQQ;6C9=Z;<orqH6s(`
z5i1~T0Cy-rng0n^@v%8JN#y6_Jj--q?hYS^`}{bgY&`x^ye05NGC)I%h*!#pbZfET
z#3hH-la_?gvlKEEgBNINgg^0GE_c@d5`TN4!S;C|h)~x`OMQL>r=DCIN(9*6+M6B@
zxs`wN6xT)|7i1SWTC!c~hB0Z8iqLmkD@Dr`JHXA<srC4|B<8I@KmC!}KO>vO7_4yC
zV|a35e9U<&s!k0E_#p>vX<8Gtpi)lnaH|}Ldhsbd{91muNPupU6#uw~TQ=LUmg7yS
z(wWs%nm#=1iI>@QNTYE>^xP^NP%w?}A<?etQI+s0)n^NOcB2f=^rfOv3SfnuoL($n
zZ8@1;9+2WWlHn#ZQtFhf5<0a7N?c9O<*hg`OrR!kaTIx&fMdrU?*nN}?7<Mo#sqMg
zgOmgeegU>Go2d)js@tJ!d3E@SKKowNM9Ipb$9yO4{MGY^aSAwn$8WPpkck2vK2Jsl
zfA4cj-Dq*kV8=hSfvNYqEsh}GotHPg9%O6UsKpYch(HdGJ6sfdOAIlUwPe@`d8P`Q
zmYB`&m@9x7j5Tffcr5j1GeL6~h(hK+D4!Xl1GY_?qD7BPhpJqkeKRda<<w@sZYpaC
z_dnb}Z*&`mOFwxstQ4Rzprn<&9bR`3EA3$>moS_p>y^j-=}qo}2;!BRi3euWcp1aR
zjjc~Aon{W!WSK3eoflg%{sC+P4p*N{P4@!43L_Y52O^#QUvQ*8j(Sa{{Q#OOcYxWf
zB~rZB7sY0fw7*|mo?G3x*8E3|D9U-Qfa7A{`1t3k=pM|$T6fWdxlQk2jOg&ebF3M9
z4j+*g@walbH>ut$AH*-)USrGo?4RY1P1~bH-Q!*kR}uwlxN_O8XaXhB*0>iBcrn$+
z!RD<(sxY7=-l(N?gV@O6m7XD;T!H|N#1P?6<;}Y?_)PAj`$Czm`&KVSMbs6EVx${i
z9O{VsPqg?f&8s2i>Zo6f^E+4GMhaDYMrU-^Xu{AOnm!BbI5_Y)NJm~2w*qNTNxYNw
zGZvkYQIJ-9J<(4*VAF6>o)Yh#h<$Lm9a&c;=5p0LX^@XHKc04*ZMGvU@=WU&v|sP^
zKB{u+l)XP<wu@DZLT%VKSlQv|XY{|<T+K^ZdXo<SjOJZSOm=4*Xh>L?gfo^Y$FWMI
z?~GZ>ZgQ{u0*r+tSizV^u${Xpe5sS@FL=tjFhG6Wgkl&mmp+Y8>_3_N^-?3)gtsGO
z!+G_W?U1rcMT-YN$7b-y*~bn%^6EDMP7eSbjZ^jzHeNtgh|LCh&0(Zakg!ubsYdt%
z`W_cvNj2u~M0w)W$12;=>=w`RV)ZZt6PUdT)JTF)0{1t_M-isIjSY{+yEAKcSHf+)
z=b${Hrn(c7`Uh0N)7}T^k$N;S2-TgZhj^DbhZ=`*NOSs?D;|x022}<1PjnFQnn3AK
z<4Q4;x37HGG5)xnaa=<WJn)0Yo>HGyXOP5JXzM2uf8U=40&{FuO(=lji<Y^a5|}7V
zm#Z~3SO<=5ctO112;Mef#C`b&j&d*3s}?%6(QCR$w#L)euYaGiGwR|PuIr5S35sOh
zQs)*gLIM~$S2cET?c5VMR<F@dR#d~3_ElmJ*-hBf*LKh(!-=vDATVm_`?ej$Ip&us
zr%3^Pm3`68-_6gH0hqoA<7X<l8$*IMMP-{KiaKbbat`}87+SR|4DgaQr73!-Ar<V)
z(9%S&K2MY5OEpW1Oa-UKAxlCIt6y<c_iMF`hF3t=>AEVRH~g7jQuUL>RGcfNEx%or
zqZfje^qi`Tawtw3;BNpP^5OSJO`um=Gh>!-P)sZXToFLA<+?qm3lHnZh!t(m^{kn@
zfuZ=*0HY5nrNG)r17O-mSm7rCOiYqFSRnKI_FTFcNWN#eLSy_y`~5S$QfbMsHmj?I
zl2#?X9qpmf2E1v54=9j|7yUh@+c9-S<G<+2yPA4sXJfywmqS4GXRQF$rCQqPh9!pF
ze?8}@dci0r5sO^(r15nR{=@hC;lcpT#+!tqqPNgq%z{~5kU~0iVlr9BKJ`OC%(&yJ
zT9~{TElo(-5#c|n&GGr=15-535=f91iq9M1+<b_tew4f>q0%x$cv$!j(a%pF+g7#+
zNGZ~vsoK^Tc<RTR5)7B7*p8_Mx}@|dUW?2n3)+WyD&E5<CLlFnx)?BesV(2y^X&A7
z)vV%R$g1!IPQW6khntvyVZI*7-+ZM*-&4AuU_@beEg{d_*Fb$qvwrf|BRrJ>wXA^A
zPKz#<gUmvcjqD4Iv4@dQ7KS_4q7H8TaX51T`HmN-p4P10bvC@mPrPjf0vgLazYHCg
z&f)~`+{|$INPbuT=~h1Ws&Q)hM)dwzhb_mmgEU-DuB6_Ru}wkYoz>b4x&kd=xr}GY
z_eL8wH#?}HegMHA>n1vQxXbxuorzo0){8)b^X|d7Z64Y5oZyjzE!>Pj;(XXuTXG=E
z=#pj+Kq4fL%k^=^oA~*c;59%->Pp-{O;S`Xy@r0G^y#hqetXI<rk3+dHt+&Q>v}Q(
zYV50Dk$}{Ht<S9()_68^YLGZ1`b_UAwiq<ZOh~HAQ?ySU`Cj!r>(?t#Y9W|R->jX{
zb>Pr4xQu)a@aY9hLgYZoK8P?j2^dcGszTCJb~c1`g(sGk0Qy!{P|2U}L;P)%7M0OE
z9MO5v!2NmP@y?m1($L@=e-3Y>e*K>nkOqPIzPTb3jT=+9R$HI|7Th;0`!nZ@X=y|u
zJ&g@-uRhh;eR8QI@YJHuRB%nlv)MBRGleF5+D7_Xr?kp@W6*xe4(J37U{V~3?egqi
zwwL-#Pf)P5{_TKVhO2=$hLfhZ^zOpoZ`-N*LUo{QUF5D&9g|Bzn;wfg<L_M71cQ!s
zIZNl3`|ULZ=&Fr_ommCdnZ{2NKb4!{8SDi6I&d_Ix}G?^Y2=;iEDcL|b5p3SWbwe<
z7G)hyzlwR0Z(I&03icB>8z<*wEiv1*8Z9;?fPl3vk^VvVHp~)4TceZ&R!!=Em4NV^
zm6jwL<1Tm9;44lXp4cw&S@Vf`cs(kZg_NUu-hFi)WzD<m80q1x<Re2j@p{IyS)XD<
zw2>IRXYwS@J%>OSbm>n^=XYTY;vLzEo}XT&uQV=)0!ugP!rN}#r@b9kgZ1-<54f5r
zDDWs!hV{RPaM$2pA27`={B`<f>)UG8SY5P*fw*7ic6hp-dHQUe9?|DZD&xhP^o^~l
zl}Lpqbu$}E6j#zP=d^8@NM8df5dVROM6ik8=pARdoRG^A_ehLrHoT0%&Y&A6oH1a=
zQyU5tuoqn^j|!dj*iliv6iwi4znf6EHJL$q)=FKMq6l!(Z*ec7PPPUf1#dUk<@)av
z*4#x$T04)T)!&Z^M6%RNwe?8iR5J<A$8WEJgfvXdsgqEveeK&Qc?@aD&AAmruA1g&
zYBOG?v|?Q2k-FIu7$F{|xP`!QQ%|~QgS5_qB>{2(z4)2FVH_+?)bw<|cp+;*h!pGQ
zJrr(tdKisk7#CAfKs~{1`^r21&ZKHNU_T+1xjY<!kPQO^$9t7}K(O8rN7lV(<V9Z=
z)ozE@-9i{xADQ^paDNs}u(c%{eU~Vi?966R=4HZ8KqE7OGw>tkveLaQ!tFMQR563M
zp+I-Rx>!DUMHhfG;5wQ~nP;;$yiOhnXNf=8uxo&ht4a?W9$>DCrOM$20<2Ive^<mm
zP_n<N>bvly+f{3C&$qLSz2g~5;~pml-7!)CYop`dx%B%mdBly4a^^9B!<H#av4%R;
ztPeefv2t)N+@1sF1g5U~fMs-{Nx&FD-|jK(eLJ6M_N%$bli|MUh1KsP%`K0`uC5at
z2T<8y{v}ts@?DhS`Zb2Ho!nKr(kD-COM!Y0H~*o3mc#w-l0y3C0~AI*qmGM`&ux4V
z7(36;y*k~i?oD>*0AvVFw5=Y9BqB7EAJ8@m(zEKh;IvTSBPZHbE5+Y_57`3JD%6FS
zKk_m2?bonG$*C7Tp|^i#eP~pCxb&$kHh3y5eTI0hhWt6O=dK&kQ6~mR<el!4vI!|)
zAk3nJo^-k{1KI{b-*FM@vI%|Gnv|1dc7&^4s_oTYt$ZE8QKis+A=RH{&u>way#F+#
z1wzJQ6^XwD;4wXoE&3=B&#QPpHRXwHVz{mre9upxtB4PH6b7&00Ny&;6%u$sQ}=9t
zHAOPyznZx}XGFa7gy>UY2y#ctw2vmAB+G9yYZStK<EVGqh9~ftpg092KNx54P4=Z7
z|A4Gq_;vPtQr!k(d7YqP%jaoRBGLv@)AYF1^_{ctAM6M3$0CvNlDRzyCr&<zZkNc3
zI`mzytmo6V^tC-C)8__K;J45|ok`OtQmo>fm1lny1E<a7MVa}R41zhy?;xPaBoTk9
z=P4)T^T!7utoNi_*z;gn#ss^-;0U=KrY{bVy$Rs9LZ?;LnBP&(WyENNJH$Q^Po012
zjZyx)F`mCIU9>MN{|`;BWWIy?%~_H?bKks5N2|7^R0T&h*9q<FmYd7Led$1L;CvTB
z;3Z+MDbTRqJuwb%z$gu@nu>I>&S82>A9ajpQNcN?t{XGA!A!Da6$!Z52H%WWWPs+`
z#oUF02@4Q(5@#9%?0hL_wW_4S*n&?ntW8olyNYGFt9d;}Z+K4BeQwh#{CcYc0}hw#
z1J3e<o>SRfkk|S~HyC(dWZ+g?yn&iE^AbSY8H=QhD7RM2x>(zk_M<dFNud91MeHsD
z^42s$e|Q<xpKzf-X@*R5%(x_u88gUX0mlviH|aM_sZ|eqqYQ0J1Lyk)%~=CEmKeqF
z?-8t;V-gB!B`<GHStyH+a%w5n5Gn@cd<)&@N~r^$<}0F#O4%lgjjOK)AmM$<+3kTV
zPXQC3sfdlbk>17jl0c1g_$Rof{<PjRFcae$%aLs2e$m&DPfPKmS>szq;GmxD8QpP)
z(H%{e@!-)5$(KkoT_*f#FTa1(+8Bp>>e2;h@SM}^X%@N7Cfw<HBPUH^L}RzHAh4G+
zrhE&rU4wWIcQG_)@L=-B6t1*bxi&ALIK{kQ^*jHgdq5>}oi`2K^8eZp#AZQlaQu)p
ziwU(%IF>p^sUQ4&V*|4#Gn*vcD$ResBB<Zo;q`VY6$phj$QP%UL!ki^uRh&k81Q=q
z(Df6kF@hu6na~%WCy6(8QWUP<!+1eq#eLoYvYQd@T3D2iqb_JwW<a3?BT0!N1zAiz
zIv%P)^<)EUMbA9__K#hS0r4smB8Lr$nr)b=R{2P9`}PgPKQ+$8nX09QeUVwK+2;X|
z(Tgd1Or|!|?H}bu5O(dz4B*M|LXDyHa<gab>fkoji_V>^KWwo7RKmH=%^kY<?kEw<
zfJkEFVg14Rrb<mR4^AH|Nq7y>`goV(mG2>qfb>Hg?C*(3uP_r7Kyi!nbl<tLsxfcd
zNjI0@$ll`zsZvBQkJHyaA}~8uV|fat^JR6R(g}UzHoTd3vpr6Q6C;j2Yb~o(EF?Uj
zCVH#{Ul7ri0-I%7!dh*~2$bt7t$?D;<nm;8@iK2gn3rffTx6L0u52g?XbQKf7={aU
z)`>`~MQ<7tZ7r%9aISQQB>=^B$~_RT5&j<J$Q4H@6`coCU_G_(zYZj{3Q<fvwt5G0
zS)(sx*AzOjU*jtZktJer$VTprSQ$i-uQ-M&<NJhmkh_M&fv7cui*Lu$qb4DbNJ>GT
zGgOM`Io4b@NmFh=0zpxMM2X3rSLXL_7f?jWqS8lnI7}FBt7HWpP(5QAWgS?zAr^gH
z+Tf}D5w4gK=o=;>%LoE9bgQ1|AL}tr@dT6J59;9+5O^LUcF32<J;m8w+Q1e<4~DyB
z$mk#Xb>&!V?i&dNQ*PoHu9-A;p*Fq^yQYMM?05n9$AnGo=LD!z*LY}w=7>K|th-I0
za~Ev-0TB+emM2;-spqnvJjeOWu=J7!NalO~PQmJ4FS>={Jk6Z%!rw)Dlt0|dbeisI
zdolYwX+!3;s4U=sijc7XkQ^C)?YqYuVnQ`(U_C|LD>@YqHY;Z_MrGdsOl)2LI$}G~
z%L)0m#K%IK=n<<lfJrqBF#MkeUXgpCRIM7K(3Z}yqEkZ!-;Jy}*h}>QNgPrb{`ng>
z?IcOB=nXF|L|<byeN17vwdV)dU~#r*L$#lq1Z0Mjk%(Y*=>&z*JMrp|rU%F;dPEw<
z=eyw}a6XFXIp{8RNN_W1NK9eXDB^}_FL5^v_<+c+l&jh;BhfJ*GOL^u*O%0P%De=(
zI@`so8WO*{sb{P#EqT%;Zp0nhs~S+4aH;)thsUkx1j7oSuFV}MM{itC$5kQY?-0Q&
z210=e`E(@DOG&Q+_~VPcG!9$fTaAOz9p0EJIhTH9pC@UwA;mm%%pt%s!5Tz`@~3lh
z$ll^PtX%DEzCRyru|eWzt!VXrY?co#A>jljHd*f>V*i~B+ua#2mok;1>i5kXu^b=n
zf>IJ=s{kur#aoz~EXpJWZ!88=T88GEvW3ov73v2<jp??gSB>mOOxoE6ZiCnrR3|aV
zEF^{ATBGvx^qk)gUG<?PVI~O{Id+oL{Z@wL;8ADx3PSe%<9j9$M>45U)+fwC_i_M?
t1-_6X6`*nF(FXve{Qu_vcQm>3zkFJx^P(0ueEp3}1sPT8GAXlw{|5A2t+xOG

literal 0
HcmV?d00001

diff --git a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
index 6b041c117..d471dc358 100644
--- a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
+++ b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
@@ -26,6 +26,7 @@ import CohereLogo from "@/media/llmprovider/cohere.png";
 import LiteLLMLogo from "@/media/llmprovider/litellm.png";
 import AWSBedrockLogo from "@/media/llmprovider/bedrock.png";
 import DeepSeekLogo from "@/media/llmprovider/deepseek.png";
+import APIPieLogo from "@/media/llmprovider/apipie.png";
 
 import PreLoader from "@/components/Preloader";
 import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
@@ -50,6 +51,7 @@ import TextGenWebUIOptions from "@/components/LLMSelection/TextGenWebUIOptions";
 import LiteLLMOptions from "@/components/LLMSelection/LiteLLMOptions";
 import AWSBedrockLLMOptions from "@/components/LLMSelection/AwsBedrockLLMOptions";
 import DeepSeekOptions from "@/components/LLMSelection/DeepSeekOptions";
+import ApiPieLLMOptions from "@/components/LLMSelection/ApiPieOptions";
 
 import LLMItem from "@/components/LLMSelection/LLMItem";
 import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
@@ -221,6 +223,27 @@ export const AVAILABLE_LLM_PROVIDERS = [
     description: "Run DeepSeek's powerful LLMs.",
     requiredConfig: ["DeepSeekApiKey"],
   },
+  {
+    name: "AWS Bedrock",
+    value: "bedrock",
+    logo: AWSBedrockLogo,
+    options: (settings) => <AWSBedrockLLMOptions settings={settings} />,
+    description: "Run powerful foundation models privately with AWS Bedrock.",
+    requiredConfig: [
+      "AwsBedrockLLMAccessKeyId",
+      "AwsBedrockLLMAccessKey",
+      "AwsBedrockLLMRegion",
+      "AwsBedrockLLMModel",
+    ],
+  },
+  {
+    name: "APIpie",
+    value: "apipie",
+    logo: APIPieLogo,
+    options: (settings) => <ApiPieLLMOptions settings={settings} />,
+    description: "A unified API of AI services from leading providers",
+    requiredConfig: ["ApipieLLMApiKey", "ApipieLLMModelPref"],
+  },
   {
     name: "Generic OpenAI",
     value: "generic-openai",
@@ -235,19 +258,6 @@ export const AVAILABLE_LLM_PROVIDERS = [
       "GenericOpenAiKey",
     ],
   },
-  {
-    name: "AWS Bedrock",
-    value: "bedrock",
-    logo: AWSBedrockLogo,
-    options: (settings) => <AWSBedrockLLMOptions settings={settings} />,
-    description: "Run powerful foundation models privately with AWS Bedrock.",
-    requiredConfig: [
-      "AwsBedrockLLMAccessKeyId",
-      "AwsBedrockLLMAccessKey",
-      "AwsBedrockLLMRegion",
-      "AwsBedrockLLMModel",
-    ],
-  },
   {
     name: "Native",
     value: "native",
diff --git a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
index 39d10e77f..e3b4e2ee8 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
@@ -21,6 +21,7 @@ import TextGenWebUILogo from "@/media/llmprovider/text-generation-webui.png";
 import LiteLLMLogo from "@/media/llmprovider/litellm.png";
 import AWSBedrockLogo from "@/media/llmprovider/bedrock.png";
 import DeepSeekLogo from "@/media/llmprovider/deepseek.png";
+import APIPieLogo from "@/media/llmprovider/apipie.png";
 
 import CohereLogo from "@/media/llmprovider/cohere.png";
 import ZillizLogo from "@/media/vectordbs/zilliz.png";
@@ -202,6 +203,13 @@ export const LLM_SELECTION_PRIVACY = {
     description: ["Your model and chat contents are visible to DeepSeek"],
     logo: DeepSeekLogo,
   },
+  apipie: {
+    name: "APIpie.AI",
+    description: [
+      "Your model and chat contents are visible to APIpie in accordance with their terms of service.",
+    ],
+    logo: APIPieLogo,
+  },
 };
 
 export const VECTOR_DB_PRIVACY = {
diff --git a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
index 81b26f66a..1b69369f5 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
@@ -21,6 +21,7 @@ import TextGenWebUILogo from "@/media/llmprovider/text-generation-webui.png";
 import LiteLLMLogo from "@/media/llmprovider/litellm.png";
 import AWSBedrockLogo from "@/media/llmprovider/bedrock.png";
 import DeepSeekLogo from "@/media/llmprovider/deepseek.png";
+import APIPieLogo from "@/media/llmprovider/apipie.png";
 
 import CohereLogo from "@/media/llmprovider/cohere.png";
 import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
@@ -45,6 +46,7 @@ import TextGenWebUIOptions from "@/components/LLMSelection/TextGenWebUIOptions";
 import LiteLLMOptions from "@/components/LLMSelection/LiteLLMOptions";
 import AWSBedrockLLMOptions from "@/components/LLMSelection/AwsBedrockLLMOptions";
 import DeepSeekOptions from "@/components/LLMSelection/DeepSeekOptions";
+import ApiPieLLMOptions from "@/components/LLMSelection/ApiPieOptions";
 
 import LLMItem from "@/components/LLMSelection/LLMItem";
 import System from "@/models/system";
@@ -195,6 +197,13 @@ const LLMS = [
     options: (settings) => <DeepSeekOptions settings={settings} />,
     description: "Run DeepSeek's powerful LLMs.",
   },
+  {
+    name: "APIpie",
+    value: "apipie",
+    logo: APIPieLogo,
+    options: (settings) => <ApiPieLLMOptions settings={settings} />,
+    description: "A unified API of AI services from leading providers",
+  },
   {
     name: "Generic OpenAI",
     value: "generic-openai",
diff --git a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
index 97193d5a0..5fd9c8e33 100644
--- a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
+++ b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
@@ -24,6 +24,7 @@ const ENABLED_PROVIDERS = [
   "bedrock",
   "fireworksai",
   "deepseek",
+  "apipie",
   // TODO: More agent support.
   // "cohere",         // Has tool calling and will need to build explicit support
   // "huggingface"     // Can be done but already has issues with no-chat templated. Needs to be tested.
diff --git a/server/.env.example b/server/.env.example
index 80009cfe8..e6a3871d6 100644
--- a/server/.env.example
+++ b/server/.env.example
@@ -95,6 +95,10 @@ SIG_SALT='salt' # Please generate random string at least 32 chars long.
 # COHERE_API_KEY=
 # COHERE_MODEL_PREF='command-r'
 
+# LLM_PROVIDER='apipie'
+# APIPIE_LLM_API_KEY='sk-123abc'
+# APIPIE_LLM_MODEL_PREF='openrouter/llama-3.1-8b-instruct'
+
 ###########################################
 ######## Embedding API SElECTION ##########
 ###########################################
diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js
index e4c0fa9d9..8e3a61767 100644
--- a/server/models/systemSettings.js
+++ b/server/models/systemSettings.js
@@ -512,6 +512,10 @@ const SystemSettings = {
       // DeepSeek API Keys
       DeepSeekApiKey: !!process.env.DEEPSEEK_API_KEY,
       DeepSeekModelPref: process.env.DEEPSEEK_MODEL_PREF,
+
+      // APIPie LLM API Keys
+      ApipieLLMApiKey: !!process.env.APIPIE_LLM_API_KEY,
+      ApipieLLMModelPref: process.env.APIPIE_LLM_MODEL_PREF,
     };
   },
 
diff --git a/server/storage/models/.gitignore b/server/storage/models/.gitignore
index 6ed579fa3..b78160e79 100644
--- a/server/storage/models/.gitignore
+++ b/server/storage/models/.gitignore
@@ -1,4 +1,5 @@
 Xenova
 downloaded/*
 !downloaded/.placeholder
-openrouter
\ No newline at end of file
+openrouter
+apipie
\ No newline at end of file
diff --git a/server/utils/AiProviders/apipie/index.js b/server/utils/AiProviders/apipie/index.js
new file mode 100644
index 000000000..acfd2b1e6
--- /dev/null
+++ b/server/utils/AiProviders/apipie/index.js
@@ -0,0 +1,336 @@
+const { NativeEmbedder } = require("../../EmbeddingEngines/native");
+const {
+  handleDefaultStreamResponseV2,
+} = require("../../helpers/chat/responses");
+
+const { v4: uuidv4 } = require("uuid");
+const {
+  writeResponseChunk,
+  clientAbortedHandler,
+} = require("../../helpers/chat/responses");
+
+const fs = require("fs");
+const path = require("path");
+const { safeJsonParse } = require("../../http");
+const cacheFolder = path.resolve(
+  process.env.STORAGE_DIR
+    ? path.resolve(process.env.STORAGE_DIR, "models", "apipie")
+    : path.resolve(__dirname, `../../../storage/models/apipie`)
+);
+
+class ApiPieLLM {
+  constructor(embedder = null, modelPreference = null) {
+    if (!process.env.APIPIE_LLM_API_KEY)
+      throw new Error("No ApiPie LLM API key was set.");
+
+    const { OpenAI: OpenAIApi } = require("openai");
+    this.basePath = "https://apipie.ai/v1";
+    this.openai = new OpenAIApi({
+      baseURL: this.basePath,
+      apiKey: process.env.APIPIE_LLM_API_KEY ?? null,
+    });
+    this.model =
+      modelPreference ||
+      process.env.APIPIE_LLM_MODEL_PREF ||
+      "openrouter/mistral-7b-instruct";
+    this.limits = {
+      history: this.promptWindowLimit() * 0.15,
+      system: this.promptWindowLimit() * 0.15,
+      user: this.promptWindowLimit() * 0.7,
+    };
+
+    this.embedder = embedder ?? new NativeEmbedder();
+    this.defaultTemp = 0.7;
+
+    if (!fs.existsSync(cacheFolder))
+      fs.mkdirSync(cacheFolder, { recursive: true });
+    this.cacheModelPath = path.resolve(cacheFolder, "models.json");
+    this.cacheAtPath = path.resolve(cacheFolder, ".cached_at");
+  }
+
+  log(text, ...args) {
+    console.log(`\x1b[36m[${this.constructor.name}]\x1b[0m ${text}`, ...args);
+  }
+
+  // This checks if the .cached_at file has a timestamp that is more than 1Week (in millis)
+  // from the current date. If it is, then we will refetch the API so that all the models are up
+  // to date.
+  #cacheIsStale() {
+    const MAX_STALE = 6.048e8; // 1 Week in MS
+    if (!fs.existsSync(this.cacheAtPath)) return true;
+    const now = Number(new Date());
+    const timestampMs = Number(fs.readFileSync(this.cacheAtPath));
+    return now - timestampMs > MAX_STALE;
+  }
+
+  // This function fetches the models from the ApiPie API and caches them locally.
+  // We do this because the ApiPie API has a lot of models, and we need to get the proper token context window
+  // for each model and this is a constructor property - so we can really only get it if this cache exists.
+  // We used to have this as a chore, but given there is an API to get the info - this makes little sense.
+  // This might slow down the first request, but we need the proper token context window
+  // for each model and this is a constructor property - so we can really only get it if this cache exists.
+  async #syncModels() {
+    if (fs.existsSync(this.cacheModelPath) && !this.#cacheIsStale())
+      return false;
+
+    this.log("Model cache is not present or stale. Fetching from ApiPie API.");
+    await fetchApiPieModels();
+    return;
+  }
+
+  #appendContext(contextTexts = []) {
+    if (!contextTexts || !contextTexts.length) return "";
+    return (
+      "\nContext:\n" +
+      contextTexts
+        .map((text, i) => {
+          return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+        })
+        .join("")
+    );
+  }
+
+  models() {
+    if (!fs.existsSync(this.cacheModelPath)) return {};
+    return safeJsonParse(
+      fs.readFileSync(this.cacheModelPath, { encoding: "utf-8" }),
+      {}
+    );
+  }
+
+  streamingEnabled() {
+    return "streamGetChatCompletion" in this;
+  }
+
+  static promptWindowLimit(modelName) {
+    const cacheModelPath = path.resolve(cacheFolder, "models.json");
+    const availableModels = fs.existsSync(cacheModelPath)
+      ? safeJsonParse(
+          fs.readFileSync(cacheModelPath, { encoding: "utf-8" }),
+          {}
+        )
+      : {};
+    return availableModels[modelName]?.maxLength || 4096;
+  }
+
+  promptWindowLimit() {
+    const availableModels = this.models();
+    return availableModels[this.model]?.maxLength || 4096;
+  }
+
+  async isValidChatCompletionModel(model = "") {
+    await this.#syncModels();
+    const availableModels = this.models();
+    return availableModels.hasOwnProperty(model);
+  }
+
+  /**
+   * Generates appropriate content array for a message + attachments.
+   * @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
+   * @returns {string|object[]}
+   */
+  #generateContent({ userPrompt, attachments = [] }) {
+    if (!attachments.length) {
+      return userPrompt;
+    }
+
+    const content = [{ type: "text", text: userPrompt }];
+    for (let attachment of attachments) {
+      content.push({
+        type: "image_url",
+        image_url: {
+          url: attachment.contentString,
+          detail: "auto",
+        },
+      });
+    }
+    return content.flat();
+  }
+
+  constructPrompt({
+    systemPrompt = "",
+    contextTexts = [],
+    chatHistory = [],
+    userPrompt = "",
+    attachments = [],
+  }) {
+    const prompt = {
+      role: "system",
+      content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
+    };
+    return [
+      prompt,
+      ...chatHistory,
+      {
+        role: "user",
+        content: this.#generateContent({ userPrompt, attachments }),
+      },
+    ];
+  }
+
+  async getChatCompletion(messages = null, { temperature = 0.7 }) {
+    if (!(await this.isValidChatCompletionModel(this.model)))
+      throw new Error(
+        `ApiPie chat: ${this.model} is not valid for chat completion!`
+      );
+
+    const result = await this.openai.chat.completions
+      .create({
+        model: this.model,
+        messages,
+        temperature,
+      })
+      .catch((e) => {
+        throw new Error(e.message);
+      });
+
+    if (!result.hasOwnProperty("choices") || result.choices.length === 0)
+      return null;
+    return result.choices[0].message.content;
+  }
+
+  // APIPie says it supports streaming, but it does not work across all models and providers.
+  // Notably, it is not working for OpenRouter models at all.
+  // async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
+  //   if (!(await this.isValidChatCompletionModel(this.model)))
+  //     throw new Error(
+  //       `ApiPie chat: ${this.model} is not valid for chat completion!`
+  //     );
+
+  //   const streamRequest = await this.openai.chat.completions.create({
+  //     model: this.model,
+  //     stream: true,
+  //     messages,
+  //     temperature,
+  //   });
+  //   return streamRequest;
+  // }
+
+  handleStream(response, stream, responseProps) {
+    const { uuid = uuidv4(), sources = [] } = responseProps;
+
+    return new Promise(async (resolve) => {
+      let fullText = "";
+
+      // Establish listener to early-abort a streaming response
+      // in case things go sideways or the user does not like the response.
+      // We preserve the generated text but continue as if chat was completed
+      // to preserve previously generated content.
+      const handleAbort = () => clientAbortedHandler(resolve, fullText);
+      response.on("close", handleAbort);
+
+      try {
+        for await (const chunk of stream) {
+          const message = chunk?.choices?.[0];
+          const token = message?.delta?.content;
+
+          if (token) {
+            fullText += token;
+            writeResponseChunk(response, {
+              uuid,
+              sources: [],
+              type: "textResponseChunk",
+              textResponse: token,
+              close: false,
+              error: false,
+            });
+          }
+
+          if (message === undefined || message.finish_reason !== null) {
+            writeResponseChunk(response, {
+              uuid,
+              sources,
+              type: "textResponseChunk",
+              textResponse: "",
+              close: true,
+              error: false,
+            });
+            response.removeListener("close", handleAbort);
+            resolve(fullText);
+          }
+        }
+      } catch (e) {
+        writeResponseChunk(response, {
+          uuid,
+          sources,
+          type: "abort",
+          textResponse: null,
+          close: true,
+          error: e.message,
+        });
+        response.removeListener("close", handleAbort);
+        resolve(fullText);
+      }
+    });
+  }
+
+  // handleStream(response, stream, responseProps) {
+  //   return handleDefaultStreamResponseV2(response, stream, responseProps);
+  // }
+
+  // Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
+  async embedTextInput(textInput) {
+    return await this.embedder.embedTextInput(textInput);
+  }
+  async embedChunks(textChunks = []) {
+    return await this.embedder.embedChunks(textChunks);
+  }
+
+  async compressMessages(promptArgs = {}, rawHistory = []) {
+    const { messageArrayCompressor } = require("../../helpers/chat");
+    const messageArray = this.constructPrompt(promptArgs);
+    return await messageArrayCompressor(this, messageArray, rawHistory);
+  }
+}
+
+async function fetchApiPieModels(providedApiKey = null) {
+  const apiKey = providedApiKey || process.env.APIPIE_LLM_API_KEY || null;
+  return await fetch(`https://apipie.ai/v1/models`, {
+    method: "GET",
+    headers: {
+      "Content-Type": "application/json",
+      ...(apiKey ? { Authorization: `Bearer ${apiKey}` } : {}),
+    },
+  })
+    .then((res) => res.json())
+    .then(({ data = [] }) => {
+      const models = {};
+      data.forEach((model) => {
+        models[`${model.provider}/${model.model}`] = {
+          id: `${model.provider}/${model.model}`,
+          name: `${model.provider}/${model.model}`,
+          organization: model.provider,
+          maxLength: model.max_tokens,
+        };
+      });
+
+      // Cache all response information
+      if (!fs.existsSync(cacheFolder))
+        fs.mkdirSync(cacheFolder, { recursive: true });
+      fs.writeFileSync(
+        path.resolve(cacheFolder, "models.json"),
+        JSON.stringify(models),
+        {
+          encoding: "utf-8",
+        }
+      );
+      fs.writeFileSync(
+        path.resolve(cacheFolder, ".cached_at"),
+        String(Number(new Date())),
+        {
+          encoding: "utf-8",
+        }
+      );
+
+      return models;
+    })
+    .catch((e) => {
+      console.error(e);
+      return {};
+    });
+}
+
+module.exports = {
+  ApiPieLLM,
+  fetchApiPieModels,
+};
diff --git a/server/utils/agents/aibitat/index.js b/server/utils/agents/aibitat/index.js
index 1d356f00a..cabedb7f8 100644
--- a/server/utils/agents/aibitat/index.js
+++ b/server/utils/agents/aibitat/index.js
@@ -785,6 +785,8 @@ ${this.getHistory({ to: route.to })
         return new Providers.FireworksAIProvider({ model: config.model });
       case "deepseek":
         return new Providers.DeepSeekProvider({ model: config.model });
+      case "apipie":
+        return new Providers.ApiPieProvider({ model: config.model });
 
       default:
         throw new Error(
diff --git a/server/utils/agents/aibitat/providers/ai-provider.js b/server/utils/agents/aibitat/providers/ai-provider.js
index 3a144ec6c..5e64e8f26 100644
--- a/server/utils/agents/aibitat/providers/ai-provider.js
+++ b/server/utils/agents/aibitat/providers/ai-provider.js
@@ -182,6 +182,14 @@ class Provider {
           apiKey: process.env.DEEPSEEK_API_KEY ?? null,
           ...config,
         });
+      case "apipie":
+        return new ChatOpenAI({
+          configuration: {
+            baseURL: "https://apipie.ai/v1",
+          },
+          apiKey: process.env.APIPIE_LLM_API_KEY ?? null,
+          ...config,
+        });
       default:
         throw new Error(`Unsupported provider ${provider} for this task.`);
     }
diff --git a/server/utils/agents/aibitat/providers/apipie.js b/server/utils/agents/aibitat/providers/apipie.js
new file mode 100644
index 000000000..4c6a3c8bf
--- /dev/null
+++ b/server/utils/agents/aibitat/providers/apipie.js
@@ -0,0 +1,116 @@
+const OpenAI = require("openai");
+const Provider = require("./ai-provider.js");
+const InheritMultiple = require("./helpers/classes.js");
+const UnTooled = require("./helpers/untooled.js");
+
+/**
+ * The agent provider for the OpenRouter provider.
+ */
+class ApiPieProvider extends InheritMultiple([Provider, UnTooled]) {
+  model;
+
+  constructor(config = {}) {
+    const { model = "openrouter/llama-3.1-8b-instruct" } = config;
+    super();
+    const client = new OpenAI({
+      baseURL: "https://apipie.ai/v1",
+      apiKey: process.env.APIPIE_LLM_API_KEY,
+      maxRetries: 3,
+    });
+
+    this._client = client;
+    this.model = model;
+    this.verbose = true;
+  }
+
+  get client() {
+    return this._client;
+  }
+
+  async #handleFunctionCallChat({ messages = [] }) {
+    return await this.client.chat.completions
+      .create({
+        model: this.model,
+        temperature: 0,
+        messages,
+      })
+      .then((result) => {
+        if (!result.hasOwnProperty("choices"))
+          throw new Error("ApiPie chat: No results!");
+        if (result.choices.length === 0)
+          throw new Error("ApiPie chat: No results length!");
+        return result.choices[0].message.content;
+      })
+      .catch((_) => {
+        return null;
+      });
+  }
+
+  /**
+   * Create a completion based on the received messages.
+   *
+   * @param messages A list of messages to send to the API.
+   * @param functions
+   * @returns The completion.
+   */
+  async complete(messages, functions = null) {
+    try {
+      let completion;
+      if (functions.length > 0) {
+        const { toolCall, text } = await this.functionCall(
+          messages,
+          functions,
+          this.#handleFunctionCallChat.bind(this)
+        );
+
+        if (toolCall !== null) {
+          this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
+          this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
+          return {
+            result: null,
+            functionCall: {
+              name: toolCall.name,
+              arguments: toolCall.arguments,
+            },
+            cost: 0,
+          };
+        }
+        completion = { content: text };
+      }
+
+      if (!completion?.content) {
+        this.providerLog(
+          "Will assume chat completion without tool call inputs."
+        );
+        const response = await this.client.chat.completions.create({
+          model: this.model,
+          messages: this.cleanMsgs(messages),
+        });
+        completion = response.choices[0].message;
+      }
+
+      // The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
+      // from calling the exact same function over and over in a loop within a single chat exchange
+      // _but_ we should enable it to call previously used tools in a new chat interaction.
+      this.deduplicator.reset("runs");
+      return {
+        result: completion.content,
+        cost: 0,
+      };
+    } catch (error) {
+      throw error;
+    }
+  }
+
+  /**
+   * Get the cost of the completion.
+   *
+   * @param _usage The completion to get the cost for.
+   * @returns The cost of the completion.
+   */
+  getCost(_usage) {
+    return 0;
+  }
+}
+
+module.exports = ApiPieProvider;
diff --git a/server/utils/agents/aibitat/providers/index.js b/server/utils/agents/aibitat/providers/index.js
index 086e0ccf0..507bf181b 100644
--- a/server/utils/agents/aibitat/providers/index.js
+++ b/server/utils/agents/aibitat/providers/index.js
@@ -15,6 +15,7 @@ const TextWebGenUiProvider = require("./textgenwebui.js");
 const AWSBedrockProvider = require("./bedrock.js");
 const FireworksAIProvider = require("./fireworksai.js");
 const DeepSeekProvider = require("./deepseek.js");
+const ApiPieProvider = require("./apipie.js");
 
 module.exports = {
   OpenAIProvider,
@@ -34,4 +35,5 @@ module.exports = {
   TextWebGenUiProvider,
   AWSBedrockProvider,
   FireworksAIProvider,
+  ApiPieProvider,
 };
diff --git a/server/utils/agents/index.js b/server/utils/agents/index.js
index 3936f9388..ffa65c753 100644
--- a/server/utils/agents/index.js
+++ b/server/utils/agents/index.js
@@ -166,6 +166,10 @@ class AgentHandler {
         if (!process.env.DEEPSEEK_API_KEY)
           throw new Error("DeepSeek API Key must be provided to use agents.");
         break;
+      case "apipie":
+        if (!process.env.APIPIE_LLM_API_KEY)
+          throw new Error("ApiPie API Key must be provided to use agents.");
+        break;
 
       default:
         throw new Error(
@@ -212,6 +216,8 @@ class AgentHandler {
         return null;
       case "deepseek":
         return "deepseek-chat";
+      case "apipie":
+        return null;
       default:
         return "unknown";
     }
diff --git a/server/utils/helpers/customModels.js b/server/utils/helpers/customModels.js
index f061d35ff..f3430cecc 100644
--- a/server/utils/helpers/customModels.js
+++ b/server/utils/helpers/customModels.js
@@ -1,4 +1,5 @@
 const { fetchOpenRouterModels } = require("../AiProviders/openRouter");
+const { fetchApiPieModels } = require("../AiProviders/apipie");
 const { perplexityModels } = require("../AiProviders/perplexity");
 const { togetherAiModels } = require("../AiProviders/togetherAi");
 const { fireworksAiModels } = require("../AiProviders/fireworksAi");
@@ -19,6 +20,7 @@ const SUPPORT_CUSTOM_MODELS = [
   "elevenlabs-tts",
   "groq",
   "deepseek",
+  "apipie",
 ];
 
 async function getCustomModels(provider = "", apiKey = null, basePath = null) {
@@ -56,6 +58,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) {
       return await getGroqAiModels(apiKey);
     case "deepseek":
       return await getDeepSeekModels(apiKey);
+    case "apipie":
+      return await getAPIPieModels(apiKey);
     default:
       return { models: [], error: "Invalid provider for custom models" };
   }
@@ -355,6 +359,21 @@ async function getOpenRouterModels() {
   return { models, error: null };
 }
 
+async function getAPIPieModels(apiKey = null) {
+  const knownModels = await fetchApiPieModels(apiKey);
+  if (!Object.keys(knownModels).length === 0)
+    return { models: [], error: null };
+
+  const models = Object.values(knownModels).map((model) => {
+    return {
+      id: model.id,
+      organization: model.organization,
+      name: model.name,
+    };
+  });
+  return { models, error: null };
+}
+
 async function getMistralModels(apiKey = null) {
   const { OpenAI: OpenAIApi } = require("openai");
   const openai = new OpenAIApi({
diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js
index 6f2dd79d4..f3f19fb9d 100644
--- a/server/utils/helpers/index.js
+++ b/server/utils/helpers/index.js
@@ -162,6 +162,9 @@ function getLLMProvider({ provider = null, model = null } = {}) {
     case "deepseek":
       const { DeepSeekLLM } = require("../AiProviders/deepseek");
       return new DeepSeekLLM(embedder, model);
+    case "apipie":
+      const { ApiPieLLM } = require("../AiProviders/apipie");
+      return new ApiPieLLM(embedder, model);
     default:
       throw new Error(
         `ENV: No valid LLM_PROVIDER value found in environment! Using ${process.env.LLM_PROVIDER}`
@@ -285,6 +288,12 @@ function getLLMProviderClass({ provider = null } = {}) {
     case "bedrock":
       const { AWSBedrockLLM } = require("../AiProviders/bedrock");
       return AWSBedrockLLM;
+    case "deepseek":
+      const { DeepSeekLLM } = require("../AiProviders/deepseek");
+      return DeepSeekLLM;
+    case "apipie":
+      const { ApiPieLLM } = require("../AiProviders/apipie");
+      return ApiPieLLM;
     default:
       return null;
   }
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index db5cfe0e3..160e85d44 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -515,6 +515,16 @@ const KEY_MAPPING = {
     envKey: "DEEPSEEK_MODEL_PREF",
     checks: [isNotEmpty],
   },
+
+  // APIPie Options
+  ApipieLLMApiKey: {
+    envKey: "APIPIE_LLM_API_KEY",
+    checks: [isNotEmpty],
+  },
+  ApipieLLMModelPref: {
+    envKey: "APIPIE_LLM_MODEL_PREF",
+    checks: [isNotEmpty],
+  },
 };
 
 function isNotEmpty(input = "") {
@@ -617,6 +627,7 @@ function supportedLLM(input = "") {
     "generic-openai",
     "bedrock",
     "deepseek",
+    "apipie",
   ].includes(input);
   return validSelection ? null : `${input} is not a valid LLM provider.`;
 }
-- 
GitLab