From 2bc11d3f1a6a736d578b5d31af30ec26062b9c3e Mon Sep 17 00:00:00 2001
From: Timothy Carambat <rambat1010@gmail.com>
Date: Tue, 6 Feb 2024 09:17:51 -0800
Subject: [PATCH] Implement support for HuggingFace Inference Endpoints (#680)

---
 docker/.env.example                           |   5 +
 .../LLMSelection/HuggingFaceOptions/index.jsx |  56 ++++++
 .../src/media/llmprovider/huggingface.png     | Bin 0 -> 17572 bytes
 .../GeneralSettings/LLMPreference/index.jsx   |  10 +
 .../Steps/DataHandling/index.jsx              |   8 +
 .../Steps/LLMPreference/index.jsx             |  10 +
 server/.env.example                           |   5 +
 server/models/systemSettings.js               |  14 ++
 server/utils/AiProviders/huggingface/index.js | 185 ++++++++++++++++++
 server/utils/chats/stream.js                  | 106 ++++++++++
 server/utils/helpers/index.js                 |   3 +
 server/utils/helpers/updateENV.js             |  23 ++-
 12 files changed, 424 insertions(+), 1 deletion(-)
 create mode 100644 frontend/src/components/LLMSelection/HuggingFaceOptions/index.jsx
 create mode 100644 frontend/src/media/llmprovider/huggingface.png
 create mode 100644 server/utils/AiProviders/huggingface/index.js

diff --git a/docker/.env.example b/docker/.env.example
index 4213c3ff5..b7674e91b 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -48,6 +48,11 @@ GID='1000'
 # MISTRAL_API_KEY='example-mistral-ai-api-key'
 # MISTRAL_MODEL_PREF='mistral-tiny'
 
+# LLM_PROVIDER='huggingface'
+# HUGGING_FACE_LLM_ENDPOINT=https://uuid-here.us-east-1.aws.endpoints.huggingface.cloud
+# HUGGING_FACE_LLM_API_KEY=hf_xxxxxx
+# HUGGING_FACE_LLM_TOKEN_LIMIT=8000
+
 ###########################################
 ######## Embedding API SElECTION ##########
 ###########################################
diff --git a/frontend/src/components/LLMSelection/HuggingFaceOptions/index.jsx b/frontend/src/components/LLMSelection/HuggingFaceOptions/index.jsx
new file mode 100644
index 000000000..7e8747da1
--- /dev/null
+++ b/frontend/src/components/LLMSelection/HuggingFaceOptions/index.jsx
@@ -0,0 +1,56 @@
+export default function HuggingFaceOptions({ settings }) {
+  return (
+    <div className="w-full flex flex-col">
+      <div className="w-full flex items-center gap-4">
+        <div className="flex flex-col w-60">
+          <label className="text-white text-sm font-semibold block mb-4">
+            HuggingFace Inference Endpoint
+          </label>
+          <input
+            type="url"
+            name="HuggingFaceLLMEndpoint"
+            className="bg-zinc-900 text-white placeholder-white placeholder-opacity-60 text-sm rounded-lg focus:border-white block w-full p-2.5"
+            placeholder="https://example.endpoints.huggingface.cloud"
+            defaultValue={settings?.HuggingFaceLLMEndpoint}
+            required={true}
+            autoComplete="off"
+            spellCheck={false}
+          />
+        </div>
+        <div className="flex flex-col w-60">
+          <label className="text-white text-sm font-semibold block mb-4">
+            HuggingFace Access Token
+          </label>
+          <input
+            type="password"
+            name="HuggingFaceLLMAccessToken"
+            className="bg-zinc-900 text-white placeholder-white placeholder-opacity-60 text-sm rounded-lg focus:border-white block w-full p-2.5"
+            placeholder="HuggingFace Access Token"
+            defaultValue={
+              settings?.HuggingFaceLLMAccessToken ? "*".repeat(20) : ""
+            }
+            required={true}
+            autoComplete="off"
+            spellCheck={false}
+          />
+        </div>
+        <div className="flex flex-col w-60">
+          <label className="text-white text-sm font-semibold block mb-4">
+            Model Token Limit
+          </label>
+          <input
+            type="number"
+            name="HuggingFaceLLMTokenLimit"
+            className="bg-zinc-900 text-white placeholder-white placeholder-opacity-60 text-sm rounded-lg focus:border-white block w-full p-2.5"
+            placeholder="4096"
+            min={1}
+            onScroll={(e) => e.target.blur()}
+            defaultValue={settings?.HuggingFaceLLMTokenLimit}
+            required={true}
+            autoComplete="off"
+          />
+        </div>
+      </div>
+    </div>
+  );
+}
diff --git a/frontend/src/media/llmprovider/huggingface.png b/frontend/src/media/llmprovider/huggingface.png
new file mode 100644
index 0000000000000000000000000000000000000000..b1cdac08609e581c136aa60d2eaca1df4eec741e
GIT binary patch
literal 17572
zcmeIaWmH_j(kP0%1$RP%1s&WWK=9!1OmGGn+}#q~U6bJM?g4@f5ZpCDaJRwU<b2;f
z_ndRzdq3WNKkuxy*X*gTuCA`CUAwxgYrd!|%VJ}YW5B_|Vav;XP=|wqSNMCOp}-(M
z?}(9MU+7>tJr_7QoF9KL_!Jf#3K&qxRa)0o%F*85!od~J(^<$|=!1lelf4%&r;~%0
z5GNn2tq_+WpCBg}4=+D}pNEs5pNogpRfvlRzy;vr<n?K68-#;Hy0X#Kb=6h+C}ien
z&ki(q{A9uIX%B{xhl3OG6oTE_Tet#Wd)nJMxCnWQGX4WY2zLJ$%)uyz@%q0%BF^TP
zLh2u6{u>qcOO(;t)fFtn!QtWI!S2D$?&xgA!6_&x$N}Ku;NoI~VX(P)Ik*Bn*&JNn
z!YKVasSg$|X3jQXR~tu%*MCU^KRLR&iZU|(6YIZNGB^9@Fkm-lyMGK~ZpL9@XYp4o
zE*zZfod3q9xtWlwjjNr-e=sg#=lUNIJDa~V5wZh1Scx)vvYA_00^RIf8O5|5{?Xt3
zwYi0hjg`Y|w%68fR#r9+R+d0B3lWaLtlHW93$1@Z{+&F>{|yuWK>L?D|9=#RgsZEw
z&8L5g#KDCf_z#HNUpcbFRLlW1|0@*0zX|2PdI?FGyZj5|e`lg$;q)I6m{|TcfPu~~
z7MfmQ3sFXO3l~Q>XETd`wT1sCDCKMcbhR)S;{tH;umQN(0DK}G|IzeMb&C9VgS4ls
zyt$YF(BcyxA2&A}p9L2;8;_ZVAloN19!oY0K65i5z|zbD$Sd$qTL0DMf1xDv2}X#Q
zi<gU!SCA8C{(`&$fPeP+N8|s}MbpvE%=+)N#JK*6`9ER*6aD`|&Hpa0|1s5n7wi8|
ziU$A7^YvHD|9Ei!>KE1k(=YeG=@$kQl5lgic61igw6T9J1#|}5e*(IS{6+pB4gWE@
z|9w-0xsHFc!+&z<2?YNy76?NY`iHeRI%_&Q+KEX49o&H;9RCUae`E50IHLc+fw`oA
zF=38>x->DFaZ({TIB`7r4-%T5*~hCsuGt=brw_N>+?fy7GUP^;<!rL4RKg^QD1Ea$
zh1kuBx#rV7MPbx+?o#~O(gxmq$3+ztCzI-YHT%r%Y(_N-{5A46&dTbN6b=KZ2y`2!
zj9n(?qfIAsM&EwPWscr&GX~iyUOwh#Sbfj1>Z)#Rz(3>WR{5PV@mRH<moU2Hj7A%}
zkPK>fe2+#ul7m!;Cz&3fPbi5-fS)Xh29yj|N2C2Hr;0%vijJ;88w#M6co&MtK*tn@
z*BAPKkpER0Rxl9?=-MLSBOX`*@lvg3#C3>God}(6uDc<jOTA|$0d0N<d28uVs}_T&
zI|_w|^*3die-*IoNAR_e9>w&6=_5@<DGBg~XAJkfP_5hTMoei>#VR(o$NL7{t>)4f
zmCtYFRWU%RR;G`Fq&~!h{%8ad&3t&37lZkm%GUlwJY5KxCfK~2$6YLHKsqLA|BPge
zfnF=qyLNz^Yx`-5=XG;$tWW-vF;TX|i~D*4lM8<GmJDVHc*myY0-ID&bB%5AlULuQ
zH=fWsiqY+`Zt+4qQlY9k0eD~j&kII0_0mcEB6YZI*zWjx@+9GWe13)HVgtrdeV)$h
zfuT2S1Yc6>S<zEJxv(Nd;?J7g=p(i~F8X2HtpTBhUyi%7E=a6{R!_r4aZB~w)ZFNp
z%7-_&!CTybE68zI(#Y(&<-!Z?tTc(1RPfY(oz0&YuIxJ&t1;cQDCn~33r$}YYN3HK
zG96QnE!%6{m)a#{4R%|Do^8ZDniV}jUH4tR9fI>az~hE~^Bm`1a5QGBdc1LPD;M<i
z%Tf69Gs{(nBEobpth*)K>o|X`-3iC1Lmw$T5WFLbq5S8jz;)azLUXGlA1WQJI)AxB
zQ+Gn=SGow6GC;51uK8c|LHpMD$;!_6HlEM7&Frs5#jO7z-BSKAc&&=KcwXss>}#l0
z(}FNfP=g=*MX`*&jp(c2*~Lo%aXscxe7<Y&{`T1;>MVYqy0ce)EoLy;XZ}DCit}ip
zpOP@C@y|h1XM{wD7H|2+4#m>r`Hh{tAYBt16InZS^bv(79>fT7EURwSHs1Ik(wTWl
zKSBgWTddkCGJoBx-Wk@9klwE`QV8=Mv8Ygg65|`DyHAB)*tPU@Of0hlLz}XvED#|%
zM>O*7$r-p@o7ZM9pk!?R?}r-)SkZAQmcHBzRc+hCG&L`+1F0~T@?eIXeT$eCvukfW
z3T~m198LLE*p#&yhRpHtyXjHz)5N-8_d#sL(s6);nmT@R%I~Z7UX|ycYj22xNO^|x
zIT6N|ZaauLFm(2Sc7tnGt^6p}cc6tQ_J>@J<ZL7hdO4+@dfeWf;v=7NYwm_Q3Hvr7
zgSa*<j*p1jmjGeQ^-K44+-hh?w2?L%`iL|CBh}sW>3j9<pGZ{K_BC%Y;8D|*SyBc7
zSQC9S@uCwXb`kS_@=a3Oa4_T)rTzrjje)quOJDr>n_6xFcAS2R|HIn>@iZnfSbK72
zpcL*>Jv-MDR*up}+PUm<;a9NNN-n$Ca&>R-(03N&B08o~$PeJ%?Mpt+5py^Pf)N#7
z`4rhv<tI<zAZ8j0(59+oFY+kd<k`xNDU*o>KY8bOpV?W+y4v0h;Ws~$1}OZ#rDF~t
znzi3FVOdrb=y@#uzPX~6l@W%yRTx*UAU^P!Ps&5ov$=pNR5eu!0MIh@SUKy-34lix
zGhg@n@>NdNq1jHK?X~zrSYKZMPS8nKK2W-Cai{03?*legHj9JG{n|?fafu@DS9?Z@
z)*Nm8v(<;ltcEW<?^P&miI3YkfN?2sS1NtJua-ZypTfkW7dx|RiJ!b|K@Y#)Ml$HH
zd*|YME#Wdp7RH=Ca@L=Y$)3ahD41!2+zrK=358S>(bGz_QYC&n>qN!Cl{Z0hKMl-p
zc*7#Yg|vqXwCSoDQq*-|K_971r3?!YMyt2N@bRr6^FHXMC*UJuqzHa~m;Mg?mC+xy
zW77t$X~D;&FBxe^TS2LZ-`L1g&V2=`GTgP1C1W!pfznV=XJMPd1n1UYRzKBXtZ|iJ
zm3NN80Rvq=4HfDTMjSynUTN8y4D~r`Z>$+I@iUIGul`BCSV_k=QmH!jBZeQTa}|#X
zY^Ru9J%E-?#Z;jSzAV=qRk!SK4sBpkWXlQ<-~ewi=%V^n;wLH-rAKs99h@$f)zb96
z+*VEHJ-vll&D6uMe!!K|VI1Z_K{c6gUEs>E4*Ba_O52*}g&@X_;yNEI7CF__DMcC%
zyCEfe${XGgVVf7xFBjX&eO(OKw+^#kx`UJQ8eejM@R~>0X`)c;Xa!c3#q{S|e$SWY
z#puuAOwVe#0rY<>!*N^H$y~<APp0SeBJ4URKeSqwlZ%w>pCSlvekMJ*N!x8I%XaNN
zZsPft3NJ4g8B90x&|}LllB1dwUT0TAN5^!WtW-Yo`-|cnHAz`oJmDdosY>}ZdhQ~(
zEra2La`2QghnTHjxUep&PiA~P;e2L@C^lY2^G(gDN~!z@APd>IhS8t2p1aub^)*$H
z?T{>wGWs`?cIJX;jO>!$@-Lj>sB4di*51<lC*>n@QBydI+-0L?^mOo0cSX6##}4_0
zWzaEa>n!bSvg(Eyq(UnHJ3KlzYNI8OsH2uw-jDT@R_8snKj<45B^D%u>vZ?JO5Eji
zYrkQ>;ihk;+Eh(=XMNmslm1-YGccs+EIca{Tt^CXH;<<VJ|8Q{mi!7^X*TVsjNJmW
ziNwA0m`@oa-$>|1?}?+066=Sr#WT0D6i$7hb$<7!RO9rmks`r?AM;;~lBhU?Q|DWE
zMGI#&N)PqUxC+?o3QJ5Qak-QjKRHY{v-X*7wab9PF;o=c^5DsQAdWg!@9d47D)H8U
zY$e1wV!KAlf3{CX*mmYYL1!zG^#ez+uXkmpV5Ne=zO$F9M}4#+hR+W1%)`jeuYj?0
zmEOnq!w$p4bWD}r&mnWfsUF9{<og$&@k`|2r-iY5GSG+lW70|33~X@E-N2|%>b5Pv
z3LdOzWWVbnux+ob`4Bu6*-z8t{xmML?;JHXQfQhs;L>H$8gGH0+%_ISR%ATW-DS^s
zXF=1vfY9Z&@T)T5X(kH1Kb*K4OCYa$@Uv+1=UeE!k8jOe)w<<ZTPXIx>#k{wohMd0
zrpBuPvfYR4>eB<s1I5Tn-eLwEH~I1@d~^lwMIK5&%@?Su=#Tosb{bkff(iZdNC&sE
z&PYAoP_06CQJY%c#frI>l-3L{l8L-47w4WKyS{<n6<MA<QUd8a<D%~z+HJn*{#Xpk
z8}B*C6pOun_(M}h+iD&^O!}iw&wDAY733JXdd#xymj!i>nw&*r<c%^N%T+sd6zu$!
ztSP{Md9BkkyLVj`!zbm4_cQq_HPY#>A&x^-V>B)f^{07^QngFFbI0zS^qSi#W%Igu
zL7O{rx=P#K`g-iykJ~pN$6GIrmsjiho>2E%U#`)8c~j2Rocb5(#$xx4)-HrRrwbC-
z5(wl}ce$agkB6YE>h)$4$(_Z_><o|dJ(IG%_6Ro3T)pdZ54JPLZc%AE-FvpNYH^CK
zq`286IeiMZ18e=wxB#EXwA1py9b-&d0WXCYXfst%%Rpa`mP{~L-sws)4roek9Yxf5
zq?T6@u(j!zXJ_f#c9xJj=b1V2+)KJzNgUVCM6iz~0v!-Ac>%U;?19^zeb4xdz4j<Z
zI$OR_K<fd8UgUqq*F)#UwU-Raz+4&i8DSEzAnfTdODY*8_gvg<_l}tkUWQ@z5-VDB
zt;SslG2o&#{HrAX>m=U08J)xX6%DnllL}3)$xj7)fHNdISw#<FoYxYq8TEPd>(f*=
zq2POL=meut$ICRNXdB$du($H}mQEup&OY^9cL1~YWV}?@-oyB2zWsy_et|R^=p+wP
zxcNw;=q;S09PIpdkth4U<N3PME!hlo!Tq8+^z(ZsTTg;KZooZJl$Da>H?t?>uDj~S
zCh=2~$7*lOUD#qHDJV7lc}IKgRI4QWrKovggByCy;P>it%6j$r*baH_qlA+Y1S0a(
z53pjNPyXF{d@~<6zmv1dG4rtxvX{qx$?n-n;hSlKfBB@&`Zn(3Av7(dp18{>>ws3r
z->0qn#^}L7?a*&Og9{g36Lb;A{hTQJLJ;w0%phBIUA>i#$3QpGVaLzJV&u-zE31O8
zB1Ik4G}*cMW*>`ENPVeuHzF2ESO48<?TF*^k1nU-7}jc1N~m7O>Aqvtbob4aMww+{
zOAyk|HM?l(i>1v9`k<@LX))Xo)NhRLD5Ga_Y%4L!>}kjE=p-&KMeK*aYsXkfV)=nz
z+1s4>r}eD({Ejtg%){ZW9Wuf+nP$t!Gdr?44faY>EW6&*f`Ag=090|S9Y1f0ISq#o
z!OopCNLaS+?V@5OY!f-mX48fCN>v#qtn1AazmMh~aS}@}o~RxT?X|m+mM8gl8V0d7
zd)6<A+b$ZOmxha)>MD)9I;71a42WcQ#!hy0=o1u6Z_8spD){02;AP%$^up8=q!Zgk
zRH(_}$O&3R)|Km3Ag>byV4-=^;)lw;e>xTEFwpFvC1#%>zSfSMCvNd~r!9QW)fkN*
zF6*8q=ST8Yr9kSQU!#b*H`Tpdf~_RxLvFhK!;<n1la%`7Emi04UT4D#UoxeI>?3`I
zlm~P_Pb4ZHSUD?0%u)DH72J62A^t~m;TNm7o!@9<=9A0>jh5ybch)6@5VyIRndV1#
zOa&|bElD(W`9Ad8YZP;2tF4|k$j~#bPDl_XRNZElPxM|?tTieLA04~zIRm%~B_}In
z<b@EW2mswJ`}EEBYI+TRtD^=NLSYBTtWQsRP^yaWn<KLnQqFH(<Via)$g<ph_4fTa
zVtaq9`54dB(lO0r1<1ZEeTHD44-0nUPI#M5*EF0zUr=G&=x-O=9}j@Ij~0VpGY^6s
zRv$-ybj?q$*u_Ko&b~vxPB&(2o8-HH(Y<R?EWaSfPX<*)tIRF6ESuR#yR}x1>}cvb
zY$<sdR7dT*@3id2(w%xuv!Xhx;Gfxxp?Ix>MguIblsq^#-0jyS*SkSq9&pDmWBJF3
zc;*(!Rb0{>{6>ZaiF~!$*!!cEx(`djc=NQ$akSnKGD$$L_Nk>QPD8UTNsMG^X<IFA
zq+<*m0LJ$>KD+nDr!sQZ?3-WaE$?Pf2L&fDqqMiCB71R-vCe+q_CI>A_8kOrj4q$P
zyUmO)vv^9?VLO{?6FpUOvpMlm3Vtxo%5Qxp*}MxvwY;0|j=NkXx$?Mtoa=P)bY*z1
z*Gp&nly&He@QLqec>*HYLBW7JR!tRP9JAa`_vSsuE%Ww7@3hCE(efe+gpWOHpGKQ|
zIV>_P-)Y-E`Q^djmDqK<S79D(e3EC+WYL_gv`C69=gUWV@$G49oVDDF^m_ADZLHi!
z|MVTAkf-+4mQIh$^JP~Qv9o;!f9H{oo!RBuu{1HEuO!J|2DaKKVPShc(CL8`d|$WL
zsDA&r_xljR^hoasbeIJX>CYPb&OH!D*E6-d+S<I%JrNg+T4jUn@H@mYZ=50#4suQA
z|1`VfH6|_Al}XuMx(K~)+2o+k-JptV*kyC2JhQmJjHo^cqa&Q#=~|nL6qV3hFQG~P
z7D30PGfYaai>Xi@lM)<>Ew8%TrsTPD_GP_&q_Ul#M%HKbSf1L`?>>&-Aw`#NG_T9b
zd0HIzuFm)1NoUb!M=n|<K-K_C31^IH;=Vnxc$o=v)abT0@v)xGo%FMYd==;1Ua|S&
zLsl=Q+4HOZ+kX7Bh4<FEswLXhBJo?Rj%`kqLdIE}j^aFnqmE52lD!uFLu0XfaM#Z<
zAmZV40lNv1P#}_ptz;#|^sAcI$krjFN7NqB8Wkqg#TSaGbN|za;Y-+TfD}jsvhHg=
zc5dZCa&YT!7XE<y{B`s>Rfqm=DS)EOvQ9CJ%E&S==9@V@BzMfW`{==#K1uLx#KyXD
zwTHb4#|OK$by2=bViJ<FO4?KQoI|!sQbV%K$<?;6;)B`);fx}lAZMNrQFn=ByBk|_
zn^J7l{6}{09<QtUNdmd8uBx7{^CbIoZaUq=5{Y+`!>G^_@HMZiq6nXLO=1e;HF1Y)
z+l?gE=VQZNooLZ*J-?^#h`RC7?5lwa78+c;;n5Y+#i{}-f!okwUSky?&_*?Nc6NZ_
zpifof{q5puSP3L$sgf|O>Ea#?Kp$D0_MV^hNU5sOm#Qqo`aTM$zr9i4+8514m4@m5
zqjo0wcCKV7T{EhVH7!^Of=;zM5z#NNI*$9th=kRNsc}X^W6YOfMF){e36FWTK=12^
z4(dj*NJVW5fIK)>=AjJLh~xXc|J;+WfT0_Qz(mn?Tp<JkPcH_gC0B>{^b=KR#tin?
z@*dqv|IWniK+-VRzm>Q&*V6Kx8EHpVf*idpFH;@&4ah-4$&mTjUi0$-&d86z=f)to
z8e~Nq5r@z7Wi*?H@OjWh_O^b$RvuO?$&${7TaDea_Crw*<CmqH`&=7ZW*It|L{d<~
zq_jL#*<%k(4$DtWQoy9uVd!tjij(taQnOfA{m^P>u`pQ#rs{-?+d{a^HX-UojO-X{
z@fBel{w&xS5oIU&Gso&5cqhnTeNCLPRi6BA$IPPJTPDop)=o%G5lN1w<{*+8W&9l=
zlY&}!!e6K}$Buz|VbfJ@G1op40^1|xkyAYY%R^)~-?Ntz<(IdGs5c+7NxaMcsYQnR
zBPHYjM?sIqJq8w5a%A6davJyzFh4}t3V4qXO+`Xdyhk!ERY%N~Q%<jm#cw<FkncDG
zrsWvQgTH>W3FBYu>_J2&_z}fsXiyY;?c=qj8hT2Fo+www_t&0D#JTi+YmFK85_cr<
zlMw<{{2HJ4XwktI9pSJbXON_edI#oo&tQ%=gWAU69nKSL4@qvUv-8HF-a0InF?n(i
z#T5b6V|!>9`C>2op)M;2s{^=CKbbpkH%l^r&aP>~@1eN;lpUhO0p80WrBN(}5MOS#
z@k~60MpDin4?po^OqOYzU8)fhmB3^>`R4j2@OR>x>UF_e2^=bPZB0{s^q;(D8naw7
z2CEu`+L|7)ZJhE@D-mm4U!9=Fo(`XvH-BFg9ye_Ew`{AXutmf0X~g|O^V=o(*1_$n
z#1Q%VIuSV?H`Kx^=KB@h>+NlLe_xTFAS1=UUb^Gt=66%xi2(QM*2|MbdbNLf>TqkH
z0|RkY;*RNJ!<yUFb=5^amUV7Qhk8%JEs?ow$S$fS!4Ui1UH3~-LQxyfsc*lCW1A0f
z)M`Y<`0C<E0pJB{n7;ES?e0Uc7Jf2IW+z?7Sm_1KK)kD25yz>UBj+tK9D*vz@@~#D
zs%OvV<Az#bbLc)_Lh?9Pe07ypBrnFdv)j&r<aO}?ghm`4%J9u<4Nv70L1+n9C%%Nh
z3>AgfAo<d)CpK|y#2sWWM?#eh;?fJTGQqewQk3Z66*O)QNC;_WZ2jg~S~3R`LM%q<
z5DDPwspmiua4+DgpQcjM;-VJiFY?Ga+57R|X1d5jD)EzE(MNpQT4j%GND)tDlx1*&
zF#vAs7gqCX^4E5HC6#3^Du~UeO;!h;*j*@=A`41Vxp0UK!-^qgCD8Q46Q$^$nVL&@
z7c7L=!b_HsqoOR5^7uURcYr7DLF2wamyo(<GteNA(Q4KA7Hq9(T(0`imLtl59r)^3
zxAq`Yxi76&i^0p%)~@i#T2L-C8Y;FC8_SP3T7V2Q*y7CSlo?ln#fpeAyR*<9#$+ux
z<>RN{PF5x<*)crgh5vkx^}P^T$I_1YMg~?N%IWAXT0kZ#uy8PwC}>3t^k0vwtJmw5
zLNBL=@6GBs#$KjQ?T$bLFHzg@F6Q4nLrjId=$byN2s0;h?d-LxOAkzlusCGvTJx0e
zdpj;`svX2?QEw)DaUSNoU|VtxDyMCWv;ZFBHFAG2uVfXnHG<C07%9q=AWqj<EBAxt
z8ng_9P4zG`q}=9ObRK~{X)W$mU@3g(-hVqkTm+C~s3F$E2H?(4)G#L)3E*}!D1LiK
zyN1&xZEc5?78Z2H=Kj*_)peb8VceyGalV9A-P$PGwkku&ut%$fnDi~a&3BvkS^qP?
z<QQtNwCU;xqblu}#)BIYzjo!<5@g*<FH8I7s?8?>)uBL7_`$f{07D^Uv%T4F6YAh_
zWos-6fdO8}4KA?NI({eBXm+Q%QHsTy|HNYx$rT_awpSNlMkRx!K@QS8Nr?sDEA+=9
zWHK9CZ-YrRO3wtp?=3afM?u?obLiavk<eHOa~pZ4KD!00Sh;smS}w)p@R-Umefh*-
zLETs-78NJ;vMij`(5W{e&GfFre>RVI*havM5{2tMsXB3FxZFHNd_Iy1)*Dgpc?KQ0
zW@xII#~762#=HI-_=wPOxQxxh0v0B6Od9t+jjwKEIY#S(<oV}*FYL2GB+p=Bi%C(`
z*eZY|j6}Kb&--%~4>vUBn4X5+%nhhP;g2QeeQ!jb74*xe-FYP=lnESCnx@-pSyZ4H
zPN&jFEuh_Dri6HZhLH8)H7EA)yEEC>cp;{69Z{2bs8>VMq7HYR>*<>+V)NEd-4+DA
zRg|}=^^d3?u_t=`=Gew$DwoC6yN<oWA$reyi9uEa8=qJq;#SzyDlJul+r_Gz?^7ez
zgbeNoNNLlR`Xz~pxoZT9pK(O|Kl4cb#C^6XsSe8yo9BE4+n^7|68Sq)$bURESq2w(
zRvYz38?x)rAJSe3oRj_CK!0HGp$P6gW?s4A3<e~GZ*t1nxWj^VNf2HK)M(5@eCd;7
zLG(I?m8T=h8%%y2gl9E@{%%Ho8p@+9i+fv~$5N26c>OP|fW6^!-k5yI9xhGxR!+CR
zT(dC7TTFmZ#b7?9MPzaXMYwN+%cm2Nofl-2)g52yy|XU4-oNaFYatno4@;wXLQjsn
zFb8&m5Jj^hEV$rE22E>w*itHUCaGnVe(ibq*MxhBP3eP2?E{$l-=Z9iX>{4Mr|(qS
zgnaPcp1+I8WNrRkO6tmxxj9h>_71`0oBpqO^^+6+5H>gbA8~_)$*57pRZ%<>1ipil
zh|Mw5F(v%~hxX3n&yB;%s7ag(szz8U8PTsSr@^*(sfhf~i<)fN0veuj2+@Y&1aE81
z#mYO|{lad$3TqtpeLmSd1BZXYV%bJ{k^sCF&yI_nwMSa?8ml?Ti$0&HY-Qu5bGx(1
z#Tq(@c0aObhc#PR@9xf(51VAXrxmH}&8$#3&A3vUtep2Z{KP{!)jr$iKeNkzB;7R~
za8}{81djtR{KSFJ+czQs&g9B*c9etjUZ)0Di`}D3o4^3Qv|U6EYQs@)$7+*2m&>}I
z#>#RdUcKk)Xk*2>&fi;7+|<t%#^N1ow-nkc30VE5YNYl1GvgWPv(ArRx^0`1_?9w9
zUeBKzDOXN}|8Aax^l=?-sSH_oardgrhJ!Ph48_b&FF80$QV$iLYpz;03A#N}JS*2&
zuZ!Q3`CBA$_wk#VV^c#UrDDjDBgqAyg;mYYkFxkxJnuqfbi1IbblsPhn2PyW;@NpM
z-G%)8=z9!7<fNB<Kk_>#Oro71MC`c9z8P{mkv{LFMFvniqAigZbiHyGY;^g=jyf$B
zCTh|k&Gwc#MOAcV1*`S3{#bS#dbg^=vqx3^sOy*M!-$b*NLD?3!9m)AZOytBSo2cs
z6&7c-diQxLx5=XG&P2BIc%S;be*;Ch>PUSZSp&IA6Opq7_q`PNj3*F?BV!rmnDI<g
z{F(v8G%u%Gb*n2(hl#}l7l-Sx+Ie2F`-!ybV_(ItAiY6pBLNSyjo)6e#Pe?#Y>M!m
z7rm@q@rmRK+xocq;`_bBlj2f#2yAJj5RyXhCaDqJS44{|>=jbu!uTQWwTIC!i+)@y
zD_~L2t$qvUnwaVpo6r~6&UsLGbIZ`F^|J52>}z<RH<Nndv$`_uQd7QMhdTc6hVx!d
zkR0Y4*DLuS>N_~pj2~W}nf5<Tv`PM0Wn=N(j2o&5`~E7(KWmWgm=F|-n4>BjU?zv~
zTYXaP2l+y5Gc1sgNX#EA0zw6%DG^i>+VKuPMq>X?E$|}q9s$1`6*@HruKv*1MVUKJ
zymxsU0ZQ5tT^`7BKL0fvaB5?)>+)hT21Sqm#sWRmY*y$X^FHjcP~RCg`4ANM9@EfU
z-H(9*r(YpVTdPjZ&Al_ta%0O3yqULg;1XY3T6R5p8eFmKxn+*To?Wx&Hs5f9V&;Zm
z?X3oJF!;5Msn)qZFdSw4d{!Ux=)ANMXE|EV+qyPylChDyp39s+Zcb=C$s|^r>*J;-
z$h}b(Q44&kuXp>ARlr!LA1yR)37jiJ{|a5sKeVrT5XIYX+7o1vb3?Y^!{+d#j19m@
zRRp7qkD8$9VW#NG_<?r}&#Moyd)JIMjGA|Uos~s`(<TnZRT{{=L;h&<WM+75-Vb^A
z?Rb`bO$d3g+_Xn_P@$|2QY2s2pZ~E{)BY;jjjhuVZ``=is!0StqDzJ~2g#f7+d;|F
z@Y(x`fY5d^n{&eH5p7WkK@aNZC-`*lTGwi)A&#KY5jbz`?5vQC@`28ipw5kT^EvM^
z5F4wbAe>m#=0zvq(S?d`)SRx^k8Dj8nV)DOlN;e|3ysx=owh7S%EUJ*)tFQ{wm&$G
zWAe~Ts-_!pg*b|tCMPrnUu*YvaAQL!ie|nV&*Ea7y%v_yjG0g7ql4dNb9d7V$|9e|
zkCSXXT@s;#*H>keHcCQ{mhBXSr=ba+x4YEU_9I^tMGxRphj}(SLn!V3#d{Tv+!HWD
zn*nAwNnqAeWyJUPQde5^GTEC-{xTtSt)BaTOz!%+Mz^o=Gl&=?V^Xf?<c*jd8$-m0
zoz#N2RWH6e*9>kBs##&T%6&uOTBZ&=i^9U%8y*as1X9cmQGPlTx@e0xIG2$sy+|{}
zo_se|U9%WF#1Q*mWGWLcVuqe}AY%}PVEbW?e6NN_Uo<3%A?y5Z)1ot;#q`ld2w{(H
z&8maI-Ntx1FX$cQysS&?GohEeav$<U*oG`^O##9$>^EdB7O#x60!8_h(@E6?Dg?i5
zU-VG5|03t794$Gx(f&ccSp-1V>TSNbHDXmEg{4<yPq8`ji24f8`ed8f=_FE?$ulFU
zkVTr<OT|dF2U(t20xb`#-+eVnbT%h{BOPuafZ5Ge?N4k7UP9e_1Q(*P`vBLrK6<fY
zi;hm#BR!~3cJ@Lggrn)Nt=h5-e&rgLIyMm}ui;(sz#%taCA~^t8oBjD@IRxzyY4?2
z?GMVnH$g5N_1mg$dUlIomwbY3G`BsoBerL44hTNc(-+|7laV%tdlMXn_8gJ2o@hk<
zKE@mAql!APQtCt25j|ks47NZ_H8h2LX7zNz8<v#Pe+q5NM`(Q&s)lLsav)`mS~z=}
zv${&|#=>v<!PImGx*f@T{U-aV!|YH;h@UA=1flg!oGp{)3);#`urg!_MCvj*jaaO5
zU_@j0AxE)*wiymW1#nh7g>LHodAAAu2`26vt*QUO)-U<d5-9I7xK5$DSt$%oX$1V5
zpF%D`AZar(4h(yvqegh}^X=qA{9A*R@$4~qrJf#w<y+|RyA}W@As$Zx5|8!GYCZ8#
zRi@IokFlez+_6eEu?%52YXgIU!4-)=;?j0>7n&<!3%gs@cy7HLMx#KH`;|ca<yV@G
zk?A@2WuAujvcjJA@z?z;NOaBscd{vDWx|g*64v}YBmlg1s*qMaEey6i*FOEUDoc!m
z<tx<t>W@;@dCtP>&2)olSIsPKB9nx0tPM~@PScRf=IZ8lOr{OOF-@#-!q&%18^%y)
zRgwsD#3{1$^!lbV{|%>w`}a<_YT?<7UnzZMUY2&17s%iy?hdLlB}$ZfW9dB?mH(_F
zHWJU#R(afujLjPaWG4jX812qOh-yB!E=qzd+>wY!!1FQ3wm{XlKjs=vder3*%pp;9
z(=#Z&2$Ih92v)42i{89qe~yBM9fPW{c+7kKi0ZaCGDC}q7R(Cxde1yC+T*zP#`3d8
zD?M_CP;<OeO@1fh8HI{UEciqT$*q-5%K~cyk#`!SR2}QOxIhIgoeUkH?BSt3s(Ms~
z2KfC^-l(CKHoWir>k`P4ICve=gfQ<O#*j9TK6{;RdpqE|9)~V_=L@3B<kH(Wx=iM`
z&Ux72CmPgPdPNCxGwtzo8yN`w$^s9+_e8njmsH8a&}C814V5mZsfs=b-lHJRBV{*r
z@<dr;ZE#MS2JEjCHhupFFfZVpTdHDMt>6-Vw6iKckk`czcd57ixDvU`*J97lV=<Wv
zwyQhK&1kN%Xp$7*W}40nxYvssWpaL3qSANV2_<09PyL<US>h%G;a$P3;Z}JX*+JH=
zxyyztNN&{L8MusR$tTjfSpg4k?FLdY2p8~u`Y`KxYk8143yz~NFXkOLNDsuA?}dm?
z?(=a>yLaY?8A}(SR;O@Up%m)WV{S9&vtakI9$oc7noIN)s6bXTy4a(h<b7UEuVZyr
zO>p9Cn<_c8!h<A@i^m`lrU#MF*T?NWT0ID7!-bDYQLERwnE)5*jmPZ9vJW}JKodly
zO3IQZ<&i!<h34*>c_y=QtdYRouB9*XjXzsdd&yb~6}#Q;FKZgkGFvScGK#PHd67yH
z2AdGjOPVK{b4k^Q-UvvuW0CO8CkK-|*l>z(-J-}4qFibwA1qmP={#ocUH0G0r_WVz
zlSTFiWz8H!8NM<QWn@FLklQv)z}Ozkub9p*RD|?f*J9@GX&yRH_;-dijbefwj+VYz
zL+%JB+gvWiPia`~`U<DFqJ3ZJ9R#A<)L~Inw|;f`;gWt*8wv@Z$&V9UsroQZ%YNJ^
zmxCT5s>)-q3*ueW3T9qp&*$BiZ~V*>dYl#6^1k;7bQrpQE}ct)ErpB${9a11c+~)F
zw5ew|Np9GNG=1m-;Wkt7Z|2m`mYUsOO)FD9Gp>}lu1Xba3;Clp8&CgP804j+9oadP
zjhBU1Z#$&Zr(m`4<uPk`u1^oMp8!(LesVpM%pFp9C<dC(6T<lN##XYbR4>%;da~j?
z#+H{g2FZ0#&p=QN?Hh8kEC{o#@Lk&=N3S8g(UWrmPOn##?1X!x?rBre!RAZ?Y^W~e
zH<_JeEVN{$5s2-t?fOad8BmJDuRtycuKJHhT4+`%79~_m{*Waz<xV7xR~Sp~EyfP$
zLPnDw^<FwFwz_jxm(9A>*<lIfS_K9M2yMb_hPc_n$fHCH$o!9B|COc8;X!RQy(4E1
zzVf#Ym$T%5rn2RD5CAn5B#CU5HT+fDiiDt2oY-3fVUJ;oFe6;zmnDmz84~E=(TGa%
zq8B8?@lT@yhUU~bSD75@&G@M>JF(${=U%|IVywMd$5WE1kbi;Nk&nl~5%INh#|L_0
zat~0`xayImI`wN(QNGwb({oX^5ePW6eCoM-|2%3)y-<%UWE+`Pqvr$JXQC7a`n`JS
z!j)7)_7AcJJE+P$^jHDW$1-ab!M?hO*&vNIq?MJjgO9uQ#Hm8P+j(IONmWl{NZF*d
z0F=z294C;LN#b;wo2BMY_4o|ZXj}uIv2k_)Pct*Qe@k+hf6~+3TK?`)s{v6sIp?Cq
zE@t0zR{mUUqNm$rH^N>Vk+xU)rZUAzfe6g4Bz-$`M4fdnlVlz5`Jw^%-2{9vmdxp2
zang7mO`HZ>B2}iE%4*gdh*#01<{~f5mU|p$?mM+K$o=XkDr{24-7~JcNr!5Gwy6^2
zv7I5^x}(1YwJ!mxfvx0;{S2dNwiFw?IA=u5v+hz~abj~LO5embi(cWl5w{=;axm4~
z+}#+M-h4ct;PrhK8`C41u=>RiLq9eD_WkqWF2k$?W_@E$l~?24;zwr;sMS@o;2fL&
zVK8E}ksjL@y&_;MY0&ggrRX3<u#r;(Ns!6)K@4s9p;n<gchalNcstvOE+H_9aCzra
zq{#KB^N55(g0ti1vagFc+a3A7&mn;OV84lGfp0gqCL{3D<2N!^z?bJopMgCFoqou7
z#pR=4%S1fF7~E~NsX@6dje1~Jnge+XZR`bmP6WmY-F5Zm_R&8769L$+B4lkg)aGt{
zt{*vR&QS-~l}M0!AEW%enDf^b?cdi^=x(eH{Y{)gEKGaj{reLsyRAdjJqa!VBG#ki
zKHea~7B8!3`P^YOQbXDR7B={tOiZ8388Ddby<_gvPF5@UgWuW6fI>x0yxlr7E#|hL
zS-<zT7j`8c&%QI`Hu%Q+83es4pRyX5K~tLT{ZbCz{9uD%f2@m!>ZGmOb(Szi{SvP%
z@nR#86Il30Uv-V{idX78@Ddcx(HAYB*6kqe8053GbxH@oUiF)!Mmz}NH|asjT-8PZ
z*R6Y_{U8TM4Cv31#W;OTLtRqp`k?&@e6X+^aB|c0?1%R*-}%?XY%l%BLR37{xzj6I
zYA4O|DM5+W#K2J%dk6>;)2ql)I-F}5w4HVW8(6|V*Hq90E75E2!)_|jBg9KbP`~F(
zWQ-ynN51L#{@b#pn<n)BMW;hW17C>+Eyr_DRLxB*ZXakq!}{f1-(oK$WZ>_PNf^zI
zC6XyKmbftDHZcYviJ8;K9G(rlar6;I6@B67-kqS`tb8|Ip3}K%god><X#sC|OG#w+
zA%dD>GT7nHi$2}wjN43Ufyqv<peU+#T4T9^bgZK?4Pns=p;g>{dtDW)nVwd8P}Fyc
zP7<gPMi!})O7EJ|&HY0^_|hLOs{tV*F>5~g3K{5#ii9B&=3Gjx43n`uC=XY)av?lt
za;aR5QCs{iVw^#HS2S6x;W(L=UY;M?xWB~dlIP_?#qRa`q!Pf1s3;9MD48Q3XpQMQ
z*kkg36R96JWE3D$bDrmMT@kC8A;;hI<24@q?(A4edEoY&xlyE9a+UH;0-MlI?1QVJ
zN-k~0G)w{f&0Rk{R(GmTgbWO(g))r_W=?4AhztRa88UZ^3`uc~*EQ|6JsHO(%s)93
zt*Y;35IC(rOG+8eBK)jewzTn_J?|soRl!4R>J8yjCbWOMhqN<q0<V+D-#tzdyetF}
z@5%5c%?bJ?V)Qkp!ClgabX0a0VQjwd-i4pqT6-S89s6*?D(xd$tVz>a&hej+RD>=8
zj0CLG#%k^3PNKeLU69L6APYrB_sTE$WB)_$<!}<qVDM%|V`V0G&Lm~{<VE9VOM+tf
zTBcgyivV^Pm*Fd5f=k<4RAymuru&~p=M{~~)G*<jDq_1?%HK4tSVL7kzVHu&Z(Yi~
zy${_6I1rF$BpQ-@B-~yq*eTkay+MO4F?fbG3y5&MAssoi=={&CG8CIV)YjY;5x*6%
z#u+w`6rr?>Gsw-W2*PTWw)OEyCG|BFinlI$yV=uUANBd^c)E6$4uG<>+g56%dV!mx
zP_{9s_Ur)1?`WcL)J0aanWJ)ZP&%o1tf-qaL?E_N)we5@I;f2)Bk`b1U=-x)C<yw7
z1hi}+h%wziIf-+E5WI}m+XA09eozgyJ_x2#E2;G)M0sC@jPvs0bcx>|S07hDAcku@
zo4Anu+vHMG{bZ&#TNeXlWJBYvt-*pJTdF}?>$}euud`X>;*lCFN}HFLa?irb0eXE#
z!u$G%w2P|-VzlTRIa)(#B>0J=Y661rh>X{gTua|m$=r4@Fy#X+FbAqSQ`V1B7?BG(
zCNh6q7coQ`08SL%r=dD@l6@i{wVHY-;6e#a3ge(>W(@NB)48~0h63$n`UM?PBvyG1
z^QnUf6EW`?!(r+3fb;aVOgR&zo1>sL)y>i9xsSScIl|wUT3v$cv|ae+oyjfj(O6GK
zRtQ5yJH1C4iUv3Ch~%{88gqQ#Oz%pI&5eFHtw;}>%F-<jdxuFB)aUeP+$Gkh@Tc^K
zp(NYc{I6fg%?)MsTTxGrDiJ`<OOKP7V3T@yjooa5WNn4XJmG>HcoMd6v1FZ%R6@MW
zr{8xsf9R#Vy!ZG%TjT!wcoL74&(Vpp7NC7_Q8I8}NXfE_G5Sh?6KR?}iSC@3S92y2
z@rbmmG{l7fYIcPqMBFDRVY71=VW)qLsJuj|+lp+h+-v`ErOYRCsqS?g#r}c4&{mf`
zb31N7SwGJ<i!@qKK-$SFPCwgZm>9IVK&DkfPZK~2b?;~A@U$aeH%9OIG8#`h9}pc9
z8wOTZ4PPp_rhYoCw-GqUkG|`vu{o+?fET59tI_D83+2dGJ!D|fQ<8K$bX8hxxxGDL
zV5D%Mn}R#XICUuQ2k(CM0#$(+SEh;rsN>!>XGV+o4=)uVHsiL@)a+9?<+%joZ|uU%
zXKX`IfDh{uXjfiJY8;5jHo2G-5PtXQMS+3nSZ^8dT<3JCWQ?Q(|1iYQWkq`@M1D1g
znp$5l@Ob@ZyRw{xi)$dSqs3AXVJ~gg{Cmp4A)F7j`wz)W-f@hP@#Y>J%*qkoC~h#6
zF5AeEZ3-l1x|=xJfzW)a0h;4WRF<&KB1s})gRf+Rrl~9fO3i*;ftX|{Drs=E-EB>M
zlYrr>gg3`QM!Yv}_0yb<54!^d0Q*hdb>20mm77;SCxst%E8c0r_pOGBySr;Xg;Eso
zVz}jQp}rV#Pa^Xihp-R;d{#;0ngd6~c8UZs+GNE!+VU5|2ZO^9UY);I-S=VEo~bIi
z#(eU7^~XA>jnz~Za|%0(mQ=A%D{9EnaAJNV{W=MhUC#}BrK~aDEa@aoZAZ*eO=@DY
zM;%`C{SMbe8zcc%m?%A73H)w92*&(-$|LGD*hu>M39Gs%em8ZX-66;@2(x^2SPK`+
zFmOr68cS-$Y67e4RoE9~(!AzonH`oFVkmXf>3vg1eelK5X@tg0J<r7IB)S{#!B8xJ
z2@67N%Eh9a2CL#`V|xo{KP?Rwh_YLC9;JQsij`(`Wuq*tg34!lwT4iO;FFn-f`%!7
z8|uk?1(zl~<y5xBlZA<Ah3ka46B&bQ<=C#ab!<?3j|Me-UFJ}}cBF2IzbbiSJj>Td
z1BH6s4?p8<GU_7}-igu0ppfQ;pOeYy>*{NLUgQRN5pFf7+_*J;b_sSqC~4fb$yzSx
zBwKk0@Qruxok1GgG38QmigCpVXD>@aJzUDCtyvmb>1&JHVTv!tf4Jr{W-78ZlyMmp
z=8u^>?~ZK}x)DeS{gt;bN~ej-DbB+av|hNjEb~EhXrLa-Hp!x*sG6Gdr5`%qbR0}&
zvr$IAHvuWXe+i$>J3$2G1}1D$fTXJUMUkc}C`Pf$hqG+WbxV7UOdC&lqs+YLcbI*T
z%{ik#>o*IDRIPj4?S~hk-Qky1#%grmm5;DVisCKc4k_WhH!$1bhB<GTb2wOQ411ks
z)msN5)(=djCN&8z=zR>zQ0`5^wB2oXYxLD>L@ufgV}Up?>;7n2Ap?>c1Q+-}HO9?%
zSSb--pcZMWC|E5{swd1|kHSQxt^>#wr>jl~WXQMS1ymbyow`RG26N)N9tB%wbL*Kg
zm=&b_aVMHD!Q$(!7({JMXI4vLdKDIhSXLAmTjc(y!~|Oo)8q))eITEq2Oi|aQ;m_x
z$VU1$yp_JCZ+a3|;B^U&SZrYaDl`kn&s#!3lH)MfcUPnK8%Z(d<fA*4X{~}MJr@EX
zJ!n&8Xthdb9fgWh3w~7Lebuk06A!sByx^(v?7*;9+N7|m&zV|7QY)!e%@FTP0`N_l
z_gmqdlZavlq>MN7hxa@NkuEp}dNmoG{8+GeJG&w3!;M6o4=){Nd~BZ<1aYOpmtiy;
z=D*kaj+O>4ZD~LSw)ka|SI>S2%Yu17=*Dz<)0eU>)dzXX`}Yc4KU(?|M$1qbS4X#g
z0`HqYgC1Y~IS%u4LL*XZnE%}4g?TN0cti56Z!st+u?S}L<iY%C4$JSrrAFE9BjX<|
zdz~uXBXS%3W_4^Go$JruC0FRi?v4sqegah!*UmM*`F)S}&_6j{UWP8MAx(^e-ruU1
z-hB38C#d`5Fok3fsc;mroqDF*-wR$Dr|*-|c{P=>dXU#4{6g*FlC4m$&}ShFkDm;K
zY^<SGjRpFN^L=Z(HW3`JMwEMF55t2peeyl{SEdlIOSe1{3#GueSVe3f$B~vPok|wS
zt?Yr>A{Z*G4o^<U?Z60ZWWXG(K4lI*eCe(6K7AJ~T?fA6Vg)^)oCui$XC<gf)Y=PO
zSDZ2Lvbzk+r7O|hA&a^LtJp5I0s-V)#La?TN8N&D!SOEo$@%W^u3Zr5Hp)bQt(xxV
zB^R1j3&3CxshhhT!bi(%sk+vo+<2tVx}Jxz+Wp_h{~SC(isB8?rCt)tq-}WV{G1jZ
z{C!ToeH36}f;WC9d#W9|Cbru6&G~1d)Z4c&`_<g~I<_ikd%3c=Z~gpETB}js`g57f
zR9tR@J9ydK!*5DW+Nbx61lhheP_Lwv1^k;{t3?xmyZaaY(*T4zZ;8M$%geeJj5V-B
zW>;={DyK$z-h)|Ok(bnEeES5R_Bm=Rt=sI#M_RbVJGx)p$~AjT#2AB*6Pl41GoRN-
z?>EieqMZk=l*V>~iM&cOUm@tc{jJ|)ws|ZpjKJNyWxV{Q?i$cR9v?yy$<j*PPG)>5
zz3fn#Edb)O>a-)r5k*5(S-A9VgiKN<C`iDy%+`|C6W`36a7>joad{(djNj7#4j-6x
zf@C1X((6uaM=ZOCs0DKmvP4oO!nb4wpJU#LSh(VLoZ}(o)c^Dq-9w=-sd@a>55B>?
z5iK6VWwS3k@Z!8?Vp(iKWna94U$(8a{1db20jERHc>wfvZ=^1yLJriKUB~E#Mw?0D
zXHzGGV*T)O0v4X@3~Kpi`|P3@__GX7h(CeresXIy87w<2BuJ#-y9o5-x}c0z^jM>J
zt+bu$tR9nF%|BJd<|Dys)Vgk8(?uP1*%w))>FZKo3p9gr(Rxa`VHwApKWn%#m;zj^
zG330X;;nFIQgd;cvzs@)IW!zcpZ{QJib|l@@E}eE;ElhS?Hj`iVRju#c(aKz>t6}@
zd}=@KGgcDL7e0vYe)|nYd&_oo_Xpv3yCZ#*>hqm4BC<o4RZw!q0mcaL+lUznb8h<Q
zjp{H<Yve6A+jhOeVdPC`guxsQgER63nLjpRnHVdrW_Q3|RBrSZA^&5iroxSwGHvry
zse?`<=`wC638ky-ClN}o4=>h1p9b1{y0Ne!uycsG4)U`-6*MR)3NP@isK@Q5vMn8P
zO}riPpujI`a$dlSpF1LLD`E?mfR*RZ+r){7WoxjLv+}+B-63gWU2Z|GML@<oo!8_c
zH6(RAszTxjNs0iGqVDvScPr&qG>tPsz-_YmFlTAtFs8=08p_ZwEQMr_E76<bxgNb#
zaYAKij(^XyRv_=zs-!yf4MmG1xK(asM(9!o`vZz*@7D{>d3t8BSX)d#e1o&&EjbKk
znw&%%q@&Nfa^jU>#-28dM=$@{e<@v|!8WLmd69}M>^&Sugr7V=S_Is1B1%u<u)AGl
z`o)J9*4v0yMPFi%1WP%SnUd9IDkd4a#a~M>7dF{W!A%p~RFxk3RYRvTgsc54C}|Cu
zOek$V%JbTu%Ia=5ahK<%&cbJEzWAW@HR8nv`H(rgcuT7ozUo0G!>#xZ1Y)R<zBlKk
zjF>sKYe!x-U4PJ$)3zb;;l@wCj}|Z1&mf@*`IwQ{iC>}9Zpc>$KxZ*HlkmNNX>n@(
zsUVpX-wG)4CzhqnQDuP@UA|6Rd&FZ-Lk%uB5_A0M=jn%iur77nedqMx=zBC~bC)Je
zYqWK)l_q<<wDfc2E5?BHyf^NZD3;N!)+${LJGuguBKPEr1yN>U?z?JDim>1A6fh(9
za@_B)!$;1Xy)!$N(rO$lR>e=&R-z2QG|}#DC0ZSE-a{|tbrYspI1yf4Sn;|W_q#ww
zwLPlcW`m^)C;h7%sM+H4SvC2vLncZb?h6^q>u1#iSROC7ZS88ZE9qu(U!JS&ZgcEJ
z`zvG37w%uXioZ6CLr%<ZkYXs5blEsRKvU|kY+LjYX*<6&Z54X4xZ96t@+Vu+HlT?$
zx((_E!b&f<<W-m0lGKF*VD(3vtwxG53Wcyb!jp;A02XI%Meeh$Z?H<s8xr|N<y^d@
zJ0hZJD#rwHLk5yXDQqH}rzK~UK26RBSyYbUPc+Ws!`?1?TDrMUzW3GE3!gW&6$|un
z%FYMpLQ-bissZ@P2Jf=8dj)SQpsi3tFAu~{HD2zv+o$myGNVj2pcnMiv@4+f)Y2t4
zUZ}lmpNN?<%jEcO%5n#G=Di()TXL2yf;Bxfy97DTR>Jj=*t5N@yf$@%L@U$in2n|7
zuB<^&U_I3u{I82Qm9M)8niBF^WlPbo3vxs&iIpevbb>NTYnl&FV#;uVPxg~{7}vmx
znOfQQLWY^Nxf!)<X+EcgwR6jU>`ezZQe$&B9km7(x8k>}H=VIHVMrE*@=7In18PLs
z8OhMwBNSmn!(31l8Iq1iU1<BPc;TTXWa(6Z^DHW0SG)YS;+RK&Vm;q~2v)g^-ca(w
zF{yqn&A<#Reu7m>gLj=8VO7#^|4(W3|6ZZ^`3qvTgd!vi;e7S)KY++fD}ShxG!6d0
E03ZHD?*IS*

literal 0
HcmV?d00001

diff --git a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
index 1efa818d3..ac9ab71f8 100644
--- a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
+++ b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
@@ -13,6 +13,7 @@ import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
 import LocalAiLogo from "@/media/llmprovider/localai.png";
 import TogetherAILogo from "@/media/llmprovider/togetherai.png";
 import MistralLogo from "@/media/llmprovider/mistral.jpeg";
+import HuggingFaceLogo from "@/media/llmprovider/huggingface.png";
 import PreLoader from "@/components/Preloader";
 import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
 import AzureAiOptions from "@/components/LLMSelection/AzureAiOptions";
@@ -24,6 +25,7 @@ import GeminiLLMOptions from "@/components/LLMSelection/GeminiLLMOptions";
 import OllamaLLMOptions from "@/components/LLMSelection/OllamaLLMOptions";
 import TogetherAiOptions from "@/components/LLMSelection/TogetherAiOptions";
 import MistralOptions from "@/components/LLMSelection/MistralOptions";
+import HuggingFaceOptions from "@/components/LLMSelection/HuggingFaceOptions";
 import LLMItem from "@/components/LLMSelection/LLMItem";
 import { MagnifyingGlass } from "@phosphor-icons/react";
 
@@ -107,6 +109,14 @@ export default function GeneralLLMPreference() {
       options: <GeminiLLMOptions settings={settings} />,
       description: "Google's largest and most capable AI model",
     },
+    {
+      name: "HuggingFace",
+      value: "huggingface",
+      logo: HuggingFaceLogo,
+      options: <HuggingFaceOptions settings={settings} />,
+      description:
+        "Access 150,000+ open-source LLMs and the world's AI community",
+    },
     {
       name: "Ollama",
       value: "ollama",
diff --git a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
index 60a3b6da4..c86a62a43 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
@@ -10,6 +10,7 @@ import TogetherAILogo from "@/media/llmprovider/togetherai.png";
 import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
 import LocalAiLogo from "@/media/llmprovider/localai.png";
 import MistralLogo from "@/media/llmprovider/mistral.jpeg";
+import HuggingFaceLogo from "@/media/llmprovider/huggingface.png";
 import ZillizLogo from "@/media/vectordbs/zilliz.png";
 import AstraDBLogo from "@/media/vectordbs/astraDB.png";
 import ChromaLogo from "@/media/vectordbs/chroma.png";
@@ -101,6 +102,13 @@ const LLM_SELECTION_PRIVACY = {
     ],
     logo: MistralLogo,
   },
+  huggingface: {
+    name: "HuggingFace",
+    description: [
+      "Your prompts and document text used in response are sent to your HuggingFace managed endpoint",
+    ],
+    logo: HuggingFaceLogo,
+  },
 };
 
 const VECTOR_DB_PRIVACY = {
diff --git a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
index 9e8ab84a9..6970dfa1f 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
@@ -10,6 +10,7 @@ import LocalAiLogo from "@/media/llmprovider/localai.png";
 import TogetherAILogo from "@/media/llmprovider/togetherai.png";
 import AnythingLLMIcon from "@/media/logo/anything-llm-icon.png";
 import MistralLogo from "@/media/llmprovider/mistral.jpeg";
+import HuggingFaceLogo from "@/media/llmprovider/huggingface.png";
 import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
 import AzureAiOptions from "@/components/LLMSelection/AzureAiOptions";
 import AnthropicAiOptions from "@/components/LLMSelection/AnthropicAiOptions";
@@ -19,6 +20,7 @@ import NativeLLMOptions from "@/components/LLMSelection/NativeLLMOptions";
 import GeminiLLMOptions from "@/components/LLMSelection/GeminiLLMOptions";
 import OllamaLLMOptions from "@/components/LLMSelection/OllamaLLMOptions";
 import MistralOptions from "@/components/LLMSelection/MistralOptions";
+import HuggingFaceOptions from "@/components/LLMSelection/HuggingFaceOptions";
 import LLMItem from "@/components/LLMSelection/LLMItem";
 import System from "@/models/system";
 import paths from "@/utils/paths";
@@ -82,6 +84,14 @@ export default function LLMPreference({
       options: <GeminiLLMOptions settings={settings} />,
       description: "Google's largest and most capable AI model",
     },
+    {
+      name: "HuggingFace",
+      value: "huggingface",
+      logo: HuggingFaceLogo,
+      options: <HuggingFaceOptions settings={settings} />,
+      description:
+        "Access 150,000+ open-source LLMs and the world's AI community",
+    },
     {
       name: "Ollama",
       value: "ollama",
diff --git a/server/.env.example b/server/.env.example
index 96cbd58ae..ec6abcac9 100644
--- a/server/.env.example
+++ b/server/.env.example
@@ -45,6 +45,11 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea
 # MISTRAL_API_KEY='example-mistral-ai-api-key'
 # MISTRAL_MODEL_PREF='mistral-tiny'
 
+# LLM_PROVIDER='huggingface'
+# HUGGING_FACE_LLM_ENDPOINT=https://uuid-here.us-east-1.aws.endpoints.huggingface.cloud
+# HUGGING_FACE_LLM_API_KEY=hf_xxxxxx
+# HUGGING_FACE_LLM_TOKEN_LIMIT=8000
+
 ###########################################
 ######## Embedding API SElECTION ##########
 ###########################################
diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js
index b8c46524c..abb930127 100644
--- a/server/models/systemSettings.js
+++ b/server/models/systemSettings.js
@@ -194,6 +194,20 @@ const SystemSettings = {
             AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
           }
         : {}),
+
+      ...(llmProvider === "huggingface"
+        ? {
+            HuggingFaceLLMEndpoint: process.env.HUGGING_FACE_LLM_ENDPOINT,
+            HuggingFaceLLMAccessToken: !!process.env.HUGGING_FACE_LLM_API_KEY,
+            HuggingFaceLLMTokenLimit: process.env.HUGGING_FACE_LLM_TOKEN_LIMIT,
+
+            // For embedding credentials when Anthropic is selected.
+            OpenAiKey: !!process.env.OPEN_AI_KEY,
+            AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT,
+            AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY,
+            AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
+          }
+        : {}),
     };
   },
 
diff --git a/server/utils/AiProviders/huggingface/index.js b/server/utils/AiProviders/huggingface/index.js
new file mode 100644
index 000000000..4faf9b30f
--- /dev/null
+++ b/server/utils/AiProviders/huggingface/index.js
@@ -0,0 +1,185 @@
+const { NativeEmbedder } = require("../../EmbeddingEngines/native");
+const { OpenAiEmbedder } = require("../../EmbeddingEngines/openAi");
+const { chatPrompt } = require("../../chats");
+
+class HuggingFaceLLM {
+  constructor(embedder = null, _modelPreference = null) {
+    const { Configuration, OpenAIApi } = require("openai");
+    if (!process.env.HUGGING_FACE_LLM_ENDPOINT)
+      throw new Error("No HuggingFace Inference Endpoint was set.");
+    if (!process.env.HUGGING_FACE_LLM_API_KEY)
+      throw new Error("No HuggingFace Access Token was set.");
+
+    const config = new Configuration({
+      basePath: `${process.env.HUGGING_FACE_LLM_ENDPOINT}/v1`,
+      apiKey: process.env.HUGGING_FACE_LLM_API_KEY,
+    });
+    this.openai = new OpenAIApi(config);
+    // When using HF inference server - the model param is not required so
+    // we can stub it here. HF Endpoints can only run one model at a time.
+    // We set to 'tgi' so that endpoint for HF can accept message format
+    this.model = "tgi";
+    this.limits = {
+      history: this.promptWindowLimit() * 0.15,
+      system: this.promptWindowLimit() * 0.15,
+      user: this.promptWindowLimit() * 0.7,
+    };
+
+    if (!embedder)
+      console.warn(
+        "No embedding provider defined for HuggingFaceLLM - falling back to Native for embedding!"
+      );
+    this.embedder = !embedder ? new OpenAiEmbedder() : new NativeEmbedder();
+    this.defaultTemp = 0.2;
+  }
+
+  #appendContext(contextTexts = []) {
+    if (!contextTexts || !contextTexts.length) return "";
+    return (
+      "\nContext:\n" +
+      contextTexts
+        .map((text, i) => {
+          return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+        })
+        .join("")
+    );
+  }
+
+  streamingEnabled() {
+    return "streamChat" in this && "streamGetChatCompletion" in this;
+  }
+
+  promptWindowLimit() {
+    const limit = process.env.HUGGING_FACE_LLM_TOKEN_LIMIT || 4096;
+    if (!limit || isNaN(Number(limit)))
+      throw new Error("No HuggingFace token context limit was set.");
+    return Number(limit);
+  }
+
+  async isValidChatCompletionModel(_ = "") {
+    return true;
+  }
+
+  constructPrompt({
+    systemPrompt = "",
+    contextTexts = [],
+    chatHistory = [],
+    userPrompt = "",
+  }) {
+    // System prompt it not enabled for HF model chats
+    const prompt = {
+      role: "user",
+      content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
+    };
+    const assistantResponse = {
+      role: "assistant",
+      content: "Okay, I will follow those instructions",
+    };
+    return [
+      prompt,
+      assistantResponse,
+      ...chatHistory,
+      { role: "user", content: userPrompt },
+    ];
+  }
+
+  async isSafe(_input = "") {
+    // Not implemented so must be stubbed
+    return { safe: true, reasons: [] };
+  }
+
+  async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
+    const textResponse = await this.openai
+      .createChatCompletion({
+        model: this.model,
+        temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
+        n: 1,
+        messages: await this.compressMessages(
+          {
+            systemPrompt: chatPrompt(workspace),
+            userPrompt: prompt,
+            chatHistory,
+          },
+          rawHistory
+        ),
+      })
+      .then((json) => {
+        const res = json.data;
+        if (!res.hasOwnProperty("choices"))
+          throw new Error("HuggingFace chat: No results!");
+        if (res.choices.length === 0)
+          throw new Error("HuggingFace chat: No results length!");
+        return res.choices[0].message.content;
+      })
+      .catch((error) => {
+        throw new Error(
+          `HuggingFace::createChatCompletion failed with: ${error.message}`
+        );
+      });
+
+    return textResponse;
+  }
+
+  async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
+    const streamRequest = await this.openai.createChatCompletion(
+      {
+        model: this.model,
+        stream: true,
+        temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
+        n: 1,
+        messages: await this.compressMessages(
+          {
+            systemPrompt: chatPrompt(workspace),
+            userPrompt: prompt,
+            chatHistory,
+          },
+          rawHistory
+        ),
+      },
+      { responseType: "stream" }
+    );
+    return { type: "huggingFaceStream", stream: streamRequest };
+  }
+
+  async getChatCompletion(messages = null, { temperature = 0.7 }) {
+    const { data } = await this.openai.createChatCompletion({
+      model: this.model,
+      messages,
+      temperature,
+    });
+
+    if (!data.hasOwnProperty("choices")) return null;
+    return data.choices[0].message.content;
+  }
+
+  async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
+    const streamRequest = await this.openai.createChatCompletion(
+      {
+        model: this.model,
+        stream: true,
+        messages,
+        temperature,
+      },
+      { responseType: "stream" }
+    );
+    return { type: "huggingFaceStream", stream: streamRequest };
+  }
+
+  // Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
+  async embedTextInput(textInput) {
+    return await this.embedder.embedTextInput(textInput);
+  }
+  async embedChunks(textChunks = []) {
+    return await this.embedder.embedChunks(textChunks);
+  }
+
+  async compressMessages(promptArgs = {}, rawHistory = []) {
+    const { messageArrayCompressor } = require("../../helpers/chat");
+    const messageArray = this.constructPrompt(promptArgs);
+    return await messageArrayCompressor(this, messageArray, rawHistory);
+  }
+}
+
+module.exports = {
+  HuggingFaceLLM,
+};
diff --git a/server/utils/chats/stream.js b/server/utils/chats/stream.js
index 1202ab167..0ee448a5e 100644
--- a/server/utils/chats/stream.js
+++ b/server/utils/chats/stream.js
@@ -383,6 +383,112 @@ function handleStreamResponses(response, stream, responseProps) {
     });
   }
 
+  if (stream.type === "huggingFaceStream") {
+    return new Promise((resolve) => {
+      let fullText = "";
+      let chunk = "";
+      stream.stream.data.on("data", (data) => {
+        const lines = data
+          ?.toString()
+          ?.split("\n")
+          .filter((line) => line.trim() !== "");
+
+        for (const line of lines) {
+          let validJSON = false;
+          const message = chunk + line.replace(/^data:/, "");
+          if (message !== "[DONE]") {
+            // JSON chunk is incomplete and has not ended yet
+            // so we need to stitch it together. You would think JSON
+            // chunks would only come complete - but they don't!
+            try {
+              JSON.parse(message);
+              validJSON = true;
+            } catch {
+              console.log("Failed to parse message", message);
+            }
+
+            if (!validJSON) {
+              // It can be possible that the chunk decoding is running away
+              // and the message chunk fails to append due to string length.
+              // In this case abort the chunk and reset so we can continue.
+              // ref: https://github.com/Mintplex-Labs/anything-llm/issues/416
+              try {
+                chunk += message;
+              } catch (e) {
+                console.error(`Chunk appending error`, e);
+                chunk = "";
+              }
+              continue;
+            } else {
+              chunk = "";
+            }
+          }
+
+          if (message == "[DONE]") {
+            writeResponseChunk(response, {
+              uuid,
+              sources,
+              type: "textResponseChunk",
+              textResponse: "",
+              close: true,
+              error: false,
+            });
+            resolve(fullText);
+          } else {
+            let error = null;
+            let finishReason = null;
+            let token = "";
+            try {
+              const json = JSON.parse(message);
+              error = json?.error || null;
+              token = json?.choices?.[0]?.delta?.content;
+              finishReason = json?.choices?.[0]?.finish_reason || null;
+            } catch {
+              continue;
+            }
+
+            if (!!error) {
+              writeResponseChunk(response, {
+                uuid,
+                sources: [],
+                type: "textResponseChunk",
+                textResponse: null,
+                close: true,
+                error,
+              });
+              resolve("");
+              return;
+            }
+
+            if (token) {
+              fullText += token;
+              writeResponseChunk(response, {
+                uuid,
+                sources: [],
+                type: "textResponseChunk",
+                textResponse: token,
+                close: false,
+                error: false,
+              });
+            }
+
+            if (finishReason !== null) {
+              writeResponseChunk(response, {
+                uuid,
+                sources,
+                type: "textResponseChunk",
+                textResponse: "",
+                close: true,
+                error: false,
+              });
+              resolve(fullText);
+            }
+          }
+        }
+      });
+    });
+  }
+
   // If stream is not a regular OpenAI Stream (like if using native model, Ollama, or most LangChain interfaces)
   // we can just iterate the stream content instead.
   if (!stream.hasOwnProperty("data")) {
diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js
index 53a76faeb..42ed262f9 100644
--- a/server/utils/helpers/index.js
+++ b/server/utils/helpers/index.js
@@ -64,6 +64,9 @@ function getLLMProvider(modelPreference = null) {
     case "native":
       const { NativeLLM } = require("../AiProviders/native");
       return new NativeLLM(embedder, modelPreference);
+    case "huggingface":
+      const { HuggingFaceLLM } = require("../AiProviders/huggingface");
+      return new HuggingFaceLLM(embedder, modelPreference);
     default:
       throw new Error("ENV: No LLM_PROVIDER value found in environment!");
   }
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index 50b423474..acd77b2fd 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -95,6 +95,7 @@ const KEY_MAPPING = {
     checks: [nonZero],
   },
 
+  // Mistral AI API Settings
   MistralApiKey: {
     envKey: "MISTRAL_API_KEY",
     checks: [isNotEmpty],
@@ -109,12 +110,25 @@ const KEY_MAPPING = {
     envKey: "NATIVE_LLM_MODEL_PREF",
     checks: [isDownloadedModel],
   },
-
   NativeLLMTokenLimit: {
     envKey: "NATIVE_LLM_MODEL_TOKEN_LIMIT",
     checks: [nonZero],
   },
 
+  // Hugging Face LLM Inference Settings
+  HuggingFaceLLMEndpoint: {
+    envKey: "HUGGING_FACE_LLM_ENDPOINT",
+    checks: [isNotEmpty, isValidURL, validHuggingFaceEndpoint],
+  },
+  HuggingFaceLLMAccessToken: {
+    envKey: "HUGGING_FACE_LLM_API_KEY",
+    checks: [isNotEmpty],
+  },
+  HuggingFaceLLMTokenLimit: {
+    envKey: "HUGGING_FACE_LLM_TOKEN_LIMIT",
+    checks: [nonZero],
+  },
+
   EmbeddingEngine: {
     envKey: "EMBEDDING_ENGINE",
     checks: [supportedEmbeddingModel],
@@ -299,6 +313,7 @@ function supportedLLM(input = "") {
     "native",
     "togetherai",
     "mistral",
+    "huggingface",
   ].includes(input);
   return validSelection ? null : `${input} is not a valid LLM provider.`;
 }
@@ -396,6 +411,12 @@ function validDockerizedUrl(input = "") {
   return null;
 }
 
+function validHuggingFaceEndpoint(input = "") {
+  return input.slice(-6) !== ".cloud"
+    ? `Your HF Endpoint should end in ".cloud"`
+    : null;
+}
+
 // If the LLMProvider has changed we need to reset all workspace model preferences to
 // null since the provider<>model name combination will be invalid for whatever the new
 // provider is.
-- 
GitLab