From fc77b468006749af434a123625ee3a8b4a78fb45 Mon Sep 17 00:00:00 2001
From: Sean Hatfield <seanhatfield5@gmail.com>
Date: Thu, 2 May 2024 12:12:44 -0700
Subject: [PATCH] [FEAT] KoboldCPP LLM Support (#1268)

* koboldcpp LLM support

* update .env.examples for koboldcpp support

* update LLM preference order
update koboldcpp comments

---------

Co-authored-by: timothycarambat <rambat1010@gmail.com>
---
 docker/.env.example                           |   5 +
 .../LLMSelection/KoboldCPPOptions/index.jsx   | 112 +++++++++++
 frontend/src/media/llmprovider/koboldcpp.png  | Bin 0 -> 7110 bytes
 .../GeneralSettings/LLMPreference/index.jsx   |  14 ++
 .../Steps/DataHandling/index.jsx              |   8 +
 .../Steps/LLMPreference/index.jsx             |   9 +
 server/.env.example                           |   5 +
 server/models/systemSettings.js               |   5 +
 server/utils/AiProviders/koboldCPP/index.js   | 180 ++++++++++++++++++
 server/utils/helpers/customModels.js          |  25 +++
 server/utils/helpers/index.js                 |   3 +
 server/utils/helpers/updateENV.js             |  15 ++
 12 files changed, 381 insertions(+)
 create mode 100644 frontend/src/components/LLMSelection/KoboldCPPOptions/index.jsx
 create mode 100644 frontend/src/media/llmprovider/koboldcpp.png
 create mode 100644 server/utils/AiProviders/koboldCPP/index.js

diff --git a/docker/.env.example b/docker/.env.example
index 20120b5b5..e10ace026 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -66,6 +66,11 @@ GID='1000'
 # GROQ_API_KEY=gsk_abcxyz
 # GROQ_MODEL_PREF=llama3-8b-8192
 
+# LLM_PROVIDER='koboldcpp'
+# KOBOLD_CPP_BASE_PATH='http://127.0.0.1:5000/v1'
+# KOBOLD_CPP_MODEL_PREF='koboldcpp/codellama-7b-instruct.Q4_K_S'
+# KOBOLD_CPP_MODEL_TOKEN_LIMIT=4096
+
 # LLM_PROVIDER='generic-openai'
 # GENERIC_OPEN_AI_BASE_PATH='http://proxy.url.openai.com/v1'
 # GENERIC_OPEN_AI_MODEL_PREF='gpt-3.5-turbo'
diff --git a/frontend/src/components/LLMSelection/KoboldCPPOptions/index.jsx b/frontend/src/components/LLMSelection/KoboldCPPOptions/index.jsx
new file mode 100644
index 000000000..7e5e20aef
--- /dev/null
+++ b/frontend/src/components/LLMSelection/KoboldCPPOptions/index.jsx
@@ -0,0 +1,112 @@
+import { useState, useEffect } from "react";
+import System from "@/models/system";
+
+export default function KoboldCPPOptions({ settings }) {
+  const [basePathValue, setBasePathValue] = useState(
+    settings?.KoboldCPPBasePath
+  );
+  const [basePath, setBasePath] = useState(settings?.KoboldCPPBasePath);
+
+  return (
+    <div className="flex gap-4 flex-wrap">
+      <div className="flex flex-col w-60">
+        <label className="text-white text-sm font-semibold block mb-4">
+          Base URL
+        </label>
+        <input
+          type="url"
+          name="KoboldCPPBasePath"
+          className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
+          placeholder="http://127.0.0.1:5000/v1"
+          defaultValue={settings?.KoboldCPPBasePath}
+          required={true}
+          autoComplete="off"
+          spellCheck={false}
+          onChange={(e) => setBasePathValue(e.target.value)}
+          onBlur={() => setBasePath(basePathValue)}
+        />
+      </div>
+      <KoboldCPPModelSelection settings={settings} basePath={basePath} />
+      <div className="flex flex-col w-60">
+        <label className="text-white text-sm font-semibold block mb-4">
+          Token context window
+        </label>
+        <input
+          type="number"
+          name="KoboldCPPTokenLimit"
+          className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
+          placeholder="4096"
+          min={1}
+          onScroll={(e) => e.target.blur()}
+          defaultValue={settings?.KoboldCPPTokenLimit}
+          required={true}
+          autoComplete="off"
+        />
+      </div>
+    </div>
+  );
+}
+
+function KoboldCPPModelSelection({ settings, basePath = null }) {
+  const [customModels, setCustomModels] = useState([]);
+  const [loading, setLoading] = useState(true);
+
+  useEffect(() => {
+    async function findCustomModels() {
+      if (!basePath || !basePath.includes("/v1")) {
+        setCustomModels([]);
+        setLoading(false);
+        return;
+      }
+      setLoading(true);
+      const { models } = await System.customModels("koboldcpp", null, basePath);
+      setCustomModels(models || []);
+      setLoading(false);
+    }
+    findCustomModels();
+  }, [basePath]);
+
+  if (loading || customModels.length === 0) {
+    return (
+      <div className="flex flex-col w-60">
+        <label className="text-white text-sm font-semibold block mb-4">
+          Chat Model Selection
+        </label>
+        <select
+          name="KoboldCPPModelPref"
+          disabled={true}
+          className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
+        >
+          <option disabled={true} selected={true}>
+            {basePath?.includes("/v1")
+              ? "-- loading available models --"
+              : "-- waiting for URL --"}
+          </option>
+        </select>
+      </div>
+    );
+  }
+
+  return (
+    <div className="flex flex-col w-60">
+      <label className="text-white text-sm font-semibold block mb-4">
+        Chat Model Selection
+      </label>
+      <select
+        name="KoboldCPPModelPref"
+        required={true}
+        className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
+      >
+        {customModels.map((model) => (
+          <option
+            key={model.id}
+            value={model.id}
+            selected={settings?.KoboldCPPModelPref === model.id}
+          >
+            {model.id}
+          </option>
+        ))}
+      </select>
+    </div>
+  );
+}
diff --git a/frontend/src/media/llmprovider/koboldcpp.png b/frontend/src/media/llmprovider/koboldcpp.png
new file mode 100644
index 0000000000000000000000000000000000000000..5724f04ab3a1bc40ac4e56c1fe9ee61fc67ba3ea
GIT binary patch
literal 7110
zcmeHMXIPV4wnkA9rKmKOW+fse1VJD)k*YM6DjrHm0t6BgNCJdV6hx6K7E}-rQF;-i
z7ZGVn_W;tn0up-f+#TnPqt3ne$K3m4ehd#G$^Q1YzP;97>wVv~xqDe(i*4_by$lQt
zY!|iFuY&hA`fCpp_!}@`t_0p#2-??(3=GUR^w$nKkEh)X47+A9MrI^4JzY2wkCQ;4
z@HS`(cN_uCW?)cshl7teGzoFk9fx%y!rhf1-)F$VXZkP{qP+L$Uq6a2C|medb<Mw~
zf^SL?dlHENheF-l+$7x2O5k1WpfGuPd8nimR7y%5%n&D1oJa_FaVO#lVCh#=>S!X;
z1w$ZV@J>hRrV%!HGD!&n`7!HnOF|)kJcdAa!G3=T3JFDH(Kxgdi3o*Bz<yaJ3JE7+
zNLci53s=RGejCAJ=$^o_2q!xwh`TrnZHpjdNf2cnybT_UQoDTlsQ6KsB9y)~Eas<s
zzmNQC4*K6b^!v1*eEb*PS0#~LFg8Cz;6#)_d><jx<0b*N6pBF6gCO}!5dL--Topz9
zY2kEF4AIWNjes!FFAxweM6?lwfL4NBMHBI47bN=Uz5mOgnhP31LZg(WB&DRqC1u1V
zr4*sRUHY+oiho_ua3@_vDI*awGP2S(XT_1yk~ZSf@;36~a%XL1#nCo$Fx0uTQb;Lz
z)DK&KyXBuOY1#loGEy>9=Vatz(lA+h89B)x_k6$jPq!H1$w+&;Tgv}f=|9H)G5vqo
z=3m40cUON6>;Kb66Mm{E`YwMjRQmpc3t)fG{<6Pd46aHh+2dW54bcdvquL0PEe4GR
z5&Y@OZ{Gdia}i+0FNyeLiS7u(Kcyg;4F8@kco!o)9;>W|aB@W)J$)7JghIQZT|^Y2
zzt8yJ*86u5`fUz?qo2|l`UB3CncOdTFfj0VUQ|~#a(_12OEoe!7G$1*K9)Ul@>W>w
zVG$Pd+S3l~j!ZS!{d#Ago&0pXrg>*{ee!_=R}+mcTw%uTA}0?s3Jowed!p_JpH!~q
z7kH?9`R*0c)tC!6Ed5t2cGumfHWv2Oom%%&*)CjCUZ0?r^i!uhsj(r(YHDhX0b_Q0
z?VkmXzW7}8oRj6<e_sC<F|cm=EU=Occloks@7}$U;<T1T4o^={v7Y#dtV-I-NEDv1
zu`+90S45`WzRAcO>B%m|=I8Y8HXGk-m#?+9bv$BbGtIkt4`vPzl>4o;#BQ}>ui4w%
z2l-iEzaHeXNwfOU)THI?3`<E(Md0xqyu7@mvF07U#co>tZ@mU5Ca{;5H6-Q7L_|cM
zNFsd8=L{3|2cO!-T)`!#rLjUwW_-)>LJR~Km-l_8)Y%?lQ*Kh`LW`-X>7CYAqn8N@
zL$TW%5C=y`ZW@gQUzq6R^ke3z7R@a2?cW%Y)k8Z>H;i<-I6L2risDmLRGgt%Z6CRL
z^X6E0er7q+)5|M7Ha2sWKQuIyKqmVa7b}3F!O_t>;U~0Sc&#r*cH6T&@bg}tvS@8>
zJtUTynQ4ks%WOwZbY!hG`E13T;Ts}lSXY}KT4vUC<vPNAwmi9bcpQ{7-o6zqEG+!c
z&`>p074#_|Z&g`cZS&#Yu@tZzBU97(f`UWfdPgA<Vq;??Y5k6&K1PXe$BrEwtz~3d
z#Ff#c<YZM0Mg(jK;md~uCxwOIH#D%}->wSVHpPT!2~sMPwj358914ztlg}t`ZxE~C
z^CJxvPhd!~))Uz;K?vU0)G!YY4r=<B$U_WbDx2ffvYdM1^Cb3nkFC7#misXyYafaf
z5Jy8hyKqM4=7AbPJaPE`w{ji}NuAxy-aA#TvQaD%P^%2H(J-Hl6Ur+CjQbV?#9qb4
z?U)~JssZ+#w>Btma%p2@0lb#(<=9CX^E~WfVf|-G-%K5JxVX5iS7rv~oQH08c008^
zpYAQ@4G#|wBr~xcK@lYH3R=2N^{8rV?|Bg)AJ<N6=yEkPJLEkZWc4JsL6g>=sRhE4
zn#0EM_3PI?EG)3K2sOQ^-JA7K&+cJkGu{ySGC3KNem(zvUtgcE(g~d>iWI3lT(|9#
zs{lX0q}LkhO+K}(qr=R|$SB8r-QVBeEE{$AS~vE=bE`6Mfq2c33lvIWIecnrO54Do
z1{tPgTOJdBsy<qQH`OFvYE2I0s5_Nf(bCc~@ca?1f5fgS#@{ki%fdn+!?HB6NoAY=
zfv}!$e0=<nbHCTJfb!;oVcU|G&n9_n(F6P%nV1ZLrD|WgbRkyR%P6e(HNr2U{rRIR
zKX2M+@nAtEbLS%sjg9Oe<!6?b@~oRa91Imw3FT1PxNK+vWTS30oSV8dp=xZ*edf#=
z!c>p^m(kJq?CgVy2C>y3pFn~#zgxi*9UUDzIy&C<^$A)Ok;*~!g+F|FYq%zC_#-6N
zNYMm0bBG}=EzKH<WaQ)HlT%QL2Yx(}#vIgtCU;Y1d$lRBCPu}lFKw8Ihr`3&U5;G<
z{?ZI0B9d&(WSE4Lk(RdZ%E6@gU2}DHeIid3o|~V~LVY~Q%6D$!CdF1%RCHmyZQpd+
zwsNSTQXt5<p-i7mQSa3{tH>S$At52pjoF$&a`01yZCJGa9eR3cg$V{O)*mJXWdsKW
z1y!sq(zt2r8XAXlKim`){P^*soaf4Gn`p2K>$(W>LsF=KD>#lL(tG@v*rZ-jRc8Lw
zfr>=*7dThETuM(*KXB+!KuL+BANkUyOS<ryO|hkrx5ygy`e*WnS^5OR>v4#PQVEZ^
z`Af5mmT#{yEyD3?eMjJ=F$O-qihe%_(WM*$k!UEEnVNcbl|Mx6;KBol!ErG$-C0Of
zY^>1d^y|#gjh+?`_V(u~6#3fP+UDkF>pWMs<dl>P7))|!6M3{zpr^o@(b(A77H67z
z`!>_VM~?<a8luFVPCy{cc8$-jq|HwiPY+e`T4879>%Reb2t0d1K*3ddmESbe(pXG(
z^gJn*d2_ribq_nciRS}%cX#WWPyze)jOdG_XQ0rUKyJ~QF2~IBiVDW1r6oE(0Sud%
zoSa=~F&GY!?Ry7q)$?dMmjc#w;lj?HjEn$HS?%oX6qh=X05C6PT9)n?5J+EXslVsz
zyR)*Y3eoWzG1i*Q35<NoLTb@d!lKx9tSyzdk1{HNLZJqxdW(h2LPtQpAwQ>G8v|7E
z?Afz{kr5_P5;>0T>;R(73J7sm40YF+CJn{hTwOIuq@3j5b5VLx=R_h@W-B>`bFGYx
z{ej)warvqC@#Qg2VcmE6{oanRd#b8-V=x$ZkQvuKL+9pP>Z4>i96zV2qS5F#<OL5U
z35%jzI*%m~!_^@Hp`n`U>bq>~pL}}3@g#&#)&?Zw3J9p+>D1KJ_3|}ZK)@d0QgRFg
z3gsY<)T^4CN0#FsL`Fs?<Z*Ix4g!Su@Ikv|btLL_cDB?SS-O4s=~=tuFj!V+Y^aQk
z%!m4Vbz58Eq2b{Jyu69Yy(hFnPe+ukji=hKMo8bmb>|@(pUDT3y_bvoa|<@_-#-{E
zq#|gV@dwOo^plp542>v|E=YQHue+ne8Z37V@L5oIQ*G`0%DbG--~K!mA1W#$QeHxt
zzZV{UlT7<8q8zt?1ipS!98YPd%`TU13v6#Kmxaf~u)Db}+tN;*I(6*KnZ0doZISCc
zdZ;{}?{Bft>A@u39DUWSwyy2~KYuW&GKUnat5<#V`#knpT3V)%+q^qj1>i<4@(m)g
zvb@~f+y&HS2|8#jO<bGk%GFs^V_~?>A(UaBn3KZ=vWt;09kW&YNc6+UkL4iAx!dnC
zEN9$_j*iabeW4nq9JckiY(s-gGW^&WtpM{_u<v>6Ra}L8rbOidW%xcuiAv`xy_y`9
z!zqyEs;a6VYHK~LUs)L$ae+vNIL&((yH39M-^Y9J>C?Nw2A|hPw1Qjk-qp?4gqvW8
zGpx!o%o4>v>)yI`D`%_#hOD;t2=xI%ASGX{AuB5jwvN+{${-u)>sJFK<|M4E>p4s6
zq|Ad0eMu=Pdwo_agp8Pjbjb^2pP088hlhvt*eUUuWm`v}r3(k&Zf{ZJ!!~CsIIeTK
zi03TSw6*0LH{zT?(Wzsx*wh$0=dHA5dZRy|>y{n^RajLAEyFH6`!d;`52MVX^!9B7
z0vQ+>cy68v;x%0|uOFmYKRGdB^jPm4ptox+DenOLIa;1t`fwu5J~CX6GWYTmzEadQ
zV*u~79+gmJo;kXigH5??Vq$9)g^L3cq9Y5*9OE&bY@(n=AV~5^{9(W3+Pwj5BaSu>
zJ~Fk0jbox8iJGSL&1<Nu4@`8vNlH)G(9>hnkB0j<H(#zoX1{rpMW&fFY`QPDnK}<u
zvb46f#U&&#^?S~<CFn<|9MUee>|c2IKJD5Yu2Ce|$pb<{VM9YUJtY**+}zxbGM{Y@
z*DDx(fDl{j{o4-S0-(Z-qj{Y^E@DwgWIWu?b9P9hVH3AciMmu7r*>yxdb<D2BoI^T
zT3Se;DqiR1o%30@#m{QdA7?%~GgtvgxJ}XcB~LZCsEPIJ+*dsv*7UTr=3+PeNeCp>
zyBovip{vW9=hPeMy}2e6;!34Z5FhWKN{JE#4l+M_^r)khnqLl=kdUBPA;6uM;9Jqr
z0sz6nA3gF10;O{xi%ceK>*%yBz>loQS3ab5YU2v+bf(baao6*m00bQi6;Md{hTuYe
zBf*1@$m|6tEGsL+VF<v9OGE~qL%~4-0XG0ojHe_mERagpr;1aW*7^#Z)trq;l>rC#
zaB#?)3377!l~R|FLm-Krb4|+Yx{Gi7+uM1;&d8W;RrBGkL0)!bqjlbV)zHuY(CIgn
zKgKNNd4ulMbz3{TJI1LdgFp!}C2}rLb|<b4>|)^J<sI%RBr?_MmBxeshPwmwt>4$~
ze7qJGn>H?6CcnD6x;YS0S6jQcv9Xbk>4}LDCe}kYwl>%E<van8Q>^cO0@@%WBZH3A
zG1Mt}R(`qWwE3^M%}}jg<-WTMJXfWz8X9ubA!Crv^!&~nOWk;G(~b}U1(RGAWOXoX
zr8+G+xjfggohyukh2=`Am7s!aTKRLv@fJN$EJZdH00l7Ahy8wK=N~4>OeZdJ_P5eH
z?SYSh!wj!py$T2kX;U<@W_N7Qz_oqa>IYDzt*`HtyA*;5r;-4l1Je%XsQsLr@hf5(
zUD6FL1}f<u?78I+w>PJKgatBRh5mWxBA^Hv@uPDyGm>Trc_rCb0CCzB%4a|b({Y@+
zc@Yv8DMjYN=G8WS&Om>6y!BFu@mjjpRs>}0%1{uLH7{Phu7b&d(MMljpKem<(r5t?
z#|g80UlHvsCKJ#3I)&0$de&dd<}6d=*Va7f2+hpSe;F4RjQn&32q`y^TF*g05x6)$
zKED6pL0|A$>2gBeTwSvjYhi3xZ6aWt(-DrH$%g4dmzL2UrIsyBH!o$X(eB&iGDGT}
zmH6~x6c5M5!~~K71j9U62$eN8e2cft04(u9Ez=6$O7p~8Qb(TJz4lsnZNoPJQroqA
zw-O=q+qZ9bwS>G{2b@Js(j?{1@9yE-bG_I#z1+pXz@SZbJ3%jsi+9a)dviI^FEB4J
zZx1W0k>Z`M<B=d+Jwc^2)#Zln2X!N|W%a~EY1$5t&%{7;Kq8$0ECHbZiGrJii;GKS
z@A>T4ulLhQ^!#}SCU*XS&Np_V8A6ZK^hQ2yZf?#3@vcTbPn-K%2fDkpa+|$WDz!@M
z@V<TfU=FQGwnFC$34=R*eSNuNi>^lIIT%@3ggk%#+*T`9M@NTF09D~w6&00TyLO#+
zGJO~m6S~~*Q`UL-2EAIyM0W8^#sW9dmpKdVt+Z8S#cj3<>U3#h^3>P5$1v&=c4d9N
zqO<dGZxLAy2w^%tkiPx7+dyfN3-k#A_4T?8%TCA4ky#&`RHM?#AX8PZTsiRK#S1Rl
z*O8Iqa&m{L%Tu_bSXK@WQ~pnl_4T(NKjzg8<-cne>&3&y#s=Ce4NcA4Lrtp8%*-W=
zpRY%*Z7;T4#k)=S0cU+PGofke>GU>bqNmW1e8UmwwdQXmQ2I3hELlJ6XltL>)MU~O
zK6Ep~qWA^C)XvUMJv>&;V_^)@s&*XEdIcy;wut-%k0aUE<vaf<asQNbGry<^&c)3g
zmz1QP)dwUP4D=g8y4RF+yB6XizO^LACnfE<c=2N3Vs>`6r1yq9wy)$cJer-6ncXP*
zZAx02QE%ROO&@hyNzcGWrpRmk9KCaC&8q?mqNc!kn4yQ7WVgAt7?{5UG%hP&9vDdH
zbM$n7ta<{81u{<(w3t=7TjGT=w>!GJj-NWU8wi)yoR2SHN6Ok7N-gFpG8!Jg18L25
zLNoYs`s2vRKuk-5%qF!n8$GkQD05wfE(a*R6rIklE}O;iHoapc5~(sm{5k+K<~Pwu
zpn5b_<}f&1NL#8YU+L{$E_!j6Qe}@HKVILrlwzEE`Hw%aORhq;I6)HVE9mZRt}mOW
zN8P*Eob&vBTN`(*_geC`_gkQbkITpy&*xj~2r{;PBWY`EyAK%bS*Epf02OH4NjfGh
ztT}6P{^Iu5X84mQNd*Rx#~j{p(fzUkq|#ABd2^93Y{N`Xuff3eBJMh7{yF0^Pr%Dp
zuQ2BB2M!;;`|u$b!*W6rC<94!?L(ePnNiS;78J-$^p$oeS=cJCjh!gjT<VUbDagws
zKyosThR6DBQRwB>u$8MX9^*d5LZADk22%0<{k{A5ZCd;JIM~>#e0T5Dg--+Kb{?re
ztgfkfrZLHWaC$ln_=y~<yephLPn>_S2Tqu7hzt*pEBagr+I<890kqIg0yHhwbB<+h
zZcY;O=Se-jaiy)5VbGT|fmU7;S<M=+auP@paB_0#Wn!YJ(|I5^ZUCPD^7U)71ZJ`;
zSM=@5&^~@S0t3)ik%H94+1c`Jblp90N`a~26xR$n;t#r>TmJqc`o$C~^#s^|&^g(5
zvQ&Y%&(6*kxX&I3z#2Fj>ti{;9dzdobD#G*j$g@x%gTH8)p+YGbrZIT4#76xaT?8e
zZ2|L4k#bPhsmDGtl1L<~CF(~vm-+YraaAF<o+<<PT9alMQVs?zFbKpV!Ocx;d8+ro
z{{0NRypMTuuYvO&WKHNXpud5DPfAL%@0!R4b7z*9O*b8nC!}O#RD%N}u=X&}(N_Rc
z15&UW=%nlMd3_GaK$SmJ_BukLP)-A-+z=&;K%@O#C%fVR|IW<J+z1QfC<0%{y(k@<
z6TbTD9BUspNRStwEB<qA_-}D+_^%w_&iLeoxc6G`%BZCOr|qJKzIw(5Yv2C@FtFJW

literal 0
HcmV?d00001

diff --git a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
index ce37bd480..60827e0de 100644
--- a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
+++ b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
@@ -18,6 +18,7 @@ import HuggingFaceLogo from "@/media/llmprovider/huggingface.png";
 import PerplexityLogo from "@/media/llmprovider/perplexity.png";
 import OpenRouterLogo from "@/media/llmprovider/openrouter.jpeg";
 import GroqLogo from "@/media/llmprovider/groq.png";
+import KoboldCPPLogo from "@/media/llmprovider/koboldcpp.png";
 import CohereLogo from "@/media/llmprovider/cohere.png";
 import PreLoader from "@/components/Preloader";
 import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
@@ -40,6 +41,7 @@ import CohereAiOptions from "@/components/LLMSelection/CohereAiOptions";
 import LLMItem from "@/components/LLMSelection/LLMItem";
 import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
 import CTAButton from "@/components/lib/CTAButton";
+import KoboldCPPOptions from "@/components/LLMSelection/KoboldCPPOptions";
 
 export const AVAILABLE_LLM_PROVIDERS = [
   {
@@ -154,6 +156,18 @@ export const AVAILABLE_LLM_PROVIDERS = [
       "The fastest LLM inferencing available for real-time AI applications.",
     requiredConfig: ["GroqApiKey"],
   },
+  {
+    name: "KoboldCPP",
+    value: "koboldcpp",
+    logo: KoboldCPPLogo,
+    options: (settings) => <KoboldCPPOptions settings={settings} />,
+    description: "Run local LLMs using koboldcpp.",
+    requiredConfig: [
+      "KoboldCPPModelPref",
+      "KoboldCPPBasePath",
+      "KoboldCPPTokenLimit",
+    ],
+  },
   {
     name: "Cohere",
     value: "cohere",
diff --git a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
index d0613b8c3..6e8a18974 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
@@ -15,6 +15,7 @@ import HuggingFaceLogo from "@/media/llmprovider/huggingface.png";
 import PerplexityLogo from "@/media/llmprovider/perplexity.png";
 import OpenRouterLogo from "@/media/llmprovider/openrouter.jpeg";
 import GroqLogo from "@/media/llmprovider/groq.png";
+import KoboldCPPLogo from "@/media/llmprovider/koboldcpp.png";
 import CohereLogo from "@/media/llmprovider/cohere.png";
 import ZillizLogo from "@/media/vectordbs/zilliz.png";
 import AstraDBLogo from "@/media/vectordbs/astraDB.png";
@@ -138,6 +139,13 @@ export const LLM_SELECTION_PRIVACY = {
     ],
     logo: GroqLogo,
   },
+  koboldcpp: {
+    name: "KoboldCPP",
+    description: [
+      "Your model and chats are only accessible on the server running KoboldCPP",
+    ],
+    logo: KoboldCPPLogo,
+  },
   "generic-openai": {
     name: "Generic OpenAI compatible service",
     description: [
diff --git a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
index 0e73c399f..4cf3c221e 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
@@ -15,6 +15,7 @@ import HuggingFaceLogo from "@/media/llmprovider/huggingface.png";
 import PerplexityLogo from "@/media/llmprovider/perplexity.png";
 import OpenRouterLogo from "@/media/llmprovider/openrouter.jpeg";
 import GroqLogo from "@/media/llmprovider/groq.png";
+import KoboldCPPLogo from "@/media/llmprovider/koboldcpp.png";
 import CohereLogo from "@/media/llmprovider/cohere.png";
 import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
 import GenericOpenAiOptions from "@/components/LLMSelection/GenericOpenAiOptions";
@@ -38,6 +39,7 @@ import System from "@/models/system";
 import paths from "@/utils/paths";
 import showToast from "@/utils/toast";
 import { useNavigate } from "react-router-dom";
+import KoboldCPPOptions from "@/components/LLMSelection/KoboldCPPOptions";
 
 const TITLE = "LLM Preference";
 const DESCRIPTION =
@@ -102,6 +104,13 @@ const LLMS = [
     options: (settings) => <LocalAiOptions settings={settings} />,
     description: "Run LLMs locally on your own machine.",
   },
+  {
+    name: "KoboldCPP",
+    value: "koboldcpp",
+    logo: KoboldCPPLogo,
+    options: (settings) => <KoboldCPPOptions settings={settings} />,
+    description: "Run local LLMs using koboldcpp.",
+  },
   {
     name: "Together AI",
     value: "togetherai",
diff --git a/server/.env.example b/server/.env.example
index e515cc888..c8f05340a 100644
--- a/server/.env.example
+++ b/server/.env.example
@@ -63,6 +63,11 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea
 # GROQ_API_KEY=gsk_abcxyz
 # GROQ_MODEL_PREF=llama3-8b-8192
 
+# LLM_PROVIDER='koboldcpp'
+# KOBOLD_CPP_BASE_PATH='http://127.0.0.1:5000/v1'
+# KOBOLD_CPP_MODEL_PREF='koboldcpp/codellama-7b-instruct.Q4_K_S'
+# KOBOLD_CPP_MODEL_TOKEN_LIMIT=4096
+
 # LLM_PROVIDER='generic-openai'
 # GENERIC_OPEN_AI_BASE_PATH='http://proxy.url.openai.com/v1'
 # GENERIC_OPEN_AI_MODEL_PREF='gpt-3.5-turbo'
diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js
index dfbdb882f..f7782d26a 100644
--- a/server/models/systemSettings.js
+++ b/server/models/systemSettings.js
@@ -359,6 +359,11 @@ const SystemSettings = {
       HuggingFaceLLMAccessToken: !!process.env.HUGGING_FACE_LLM_API_KEY,
       HuggingFaceLLMTokenLimit: process.env.HUGGING_FACE_LLM_TOKEN_LIMIT,
 
+      // KoboldCPP Keys
+      KoboldCPPModelPref: process.env.KOBOLD_CPP_MODEL_PREF,
+      KoboldCPPBasePath: process.env.KOBOLD_CPP_BASE_PATH,
+      KoboldCPPTokenLimit: process.env.KOBOLD_CPP_MODEL_TOKEN_LIMIT,
+
       // Generic OpenAI Keys
       GenericOpenAiBasePath: process.env.GENERIC_OPEN_AI_BASE_PATH,
       GenericOpenAiModelPref: process.env.GENERIC_OPEN_AI_MODEL_PREF,
diff --git a/server/utils/AiProviders/koboldCPP/index.js b/server/utils/AiProviders/koboldCPP/index.js
new file mode 100644
index 000000000..4b1ff3f61
--- /dev/null
+++ b/server/utils/AiProviders/koboldCPP/index.js
@@ -0,0 +1,180 @@
+const { NativeEmbedder } = require("../../EmbeddingEngines/native");
+const {
+  clientAbortedHandler,
+  writeResponseChunk,
+} = require("../../helpers/chat/responses");
+const { v4: uuidv4 } = require("uuid");
+
+class KoboldCPPLLM {
+  constructor(embedder = null, modelPreference = null) {
+    const { OpenAI: OpenAIApi } = require("openai");
+    if (!process.env.KOBOLD_CPP_BASE_PATH)
+      throw new Error(
+        "KoboldCPP must have a valid base path to use for the api."
+      );
+
+    this.basePath = process.env.KOBOLD_CPP_BASE_PATH;
+    this.openai = new OpenAIApi({
+      baseURL: this.basePath,
+      apiKey: null,
+    });
+    this.model = modelPreference ?? process.env.KOBOLD_CPP_MODEL_PREF ?? null;
+    if (!this.model) throw new Error("KoboldCPP must have a valid model set.");
+    this.limits = {
+      history: this.promptWindowLimit() * 0.15,
+      system: this.promptWindowLimit() * 0.15,
+      user: this.promptWindowLimit() * 0.7,
+    };
+
+    if (!embedder)
+      console.warn(
+        "No embedding provider defined for KoboldCPPLLM - falling back to NativeEmbedder for embedding!"
+      );
+    this.embedder = !embedder ? new NativeEmbedder() : embedder;
+    this.defaultTemp = 0.7;
+    this.log(`Inference API: ${this.basePath} Model: ${this.model}`);
+  }
+
+  log(text, ...args) {
+    console.log(`\x1b[36m[${this.constructor.name}]\x1b[0m ${text}`, ...args);
+  }
+
+  #appendContext(contextTexts = []) {
+    if (!contextTexts || !contextTexts.length) return "";
+    return (
+      "\nContext:\n" +
+      contextTexts
+        .map((text, i) => {
+          return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+        })
+        .join("")
+    );
+  }
+
+  streamingEnabled() {
+    return "streamGetChatCompletion" in this;
+  }
+
+  // Ensure the user set a value for the token limit
+  // and if undefined - assume 4096 window.
+  promptWindowLimit() {
+    const limit = process.env.KOBOLD_CPP_MODEL_TOKEN_LIMIT || 4096;
+    if (!limit || isNaN(Number(limit)))
+      throw new Error("No token context limit was set.");
+    return Number(limit);
+  }
+
+  // Short circuit since we have no idea if the model is valid or not
+  // in pre-flight for generic endpoints
+  isValidChatCompletionModel(_modelName = "") {
+    return true;
+  }
+
+  constructPrompt({
+    systemPrompt = "",
+    contextTexts = [],
+    chatHistory = [],
+    userPrompt = "",
+  }) {
+    const prompt = {
+      role: "system",
+      content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
+    };
+    return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
+  }
+
+  async isSafe(_input = "") {
+    // Not implemented so must be stubbed
+    return { safe: true, reasons: [] };
+  }
+
+  async getChatCompletion(messages = null, { temperature = 0.7 }) {
+    const result = await this.openai.chat.completions
+      .create({
+        model: this.model,
+        messages,
+        temperature,
+      })
+      .catch((e) => {
+        throw new Error(e.response.data.error.message);
+      });
+
+    if (!result.hasOwnProperty("choices") || result.choices.length === 0)
+      return null;
+    return result.choices[0].message.content;
+  }
+
+  async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
+    const streamRequest = await this.openai.chat.completions.create({
+      model: this.model,
+      stream: true,
+      messages,
+      temperature,
+    });
+    return streamRequest;
+  }
+
+  handleStream(response, stream, responseProps) {
+    const { uuid = uuidv4(), sources = [] } = responseProps;
+
+    // Custom handler for KoboldCPP stream responses
+    return new Promise(async (resolve) => {
+      let fullText = "";
+      const handleAbort = () => clientAbortedHandler(resolve, fullText);
+      response.on("close", handleAbort);
+
+      for await (const chunk of stream) {
+        const message = chunk?.choices?.[0];
+        const token = message?.delta?.content;
+
+        if (token) {
+          fullText += token;
+          writeResponseChunk(response, {
+            uuid,
+            sources: [],
+            type: "textResponseChunk",
+            textResponse: token,
+            close: false,
+            error: false,
+          });
+        }
+
+        // KoboldCPP finishes with "length" or "stop"
+        if (
+          message.finish_reason !== "null" &&
+          (message.finish_reason === "length" ||
+            message.finish_reason === "stop")
+        ) {
+          writeResponseChunk(response, {
+            uuid,
+            sources,
+            type: "textResponseChunk",
+            textResponse: "",
+            close: true,
+            error: false,
+          });
+          response.removeListener("close", handleAbort);
+          resolve(fullText);
+        }
+      }
+    });
+  }
+
+  // Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
+  async embedTextInput(textInput) {
+    return await this.embedder.embedTextInput(textInput);
+  }
+  async embedChunks(textChunks = []) {
+    return await this.embedder.embedChunks(textChunks);
+  }
+
+  async compressMessages(promptArgs = {}, rawHistory = []) {
+    const { messageArrayCompressor } = require("../../helpers/chat");
+    const messageArray = this.constructPrompt(promptArgs);
+    return await messageArrayCompressor(this, messageArray, rawHistory);
+  }
+}
+
+module.exports = {
+  KoboldCPPLLM,
+};
diff --git a/server/utils/helpers/customModels.js b/server/utils/helpers/customModels.js
index 1bb54170a..ce690ae47 100644
--- a/server/utils/helpers/customModels.js
+++ b/server/utils/helpers/customModels.js
@@ -14,6 +14,7 @@ const SUPPORT_CUSTOM_MODELS = [
   "perplexity",
   "openrouter",
   "lmstudio",
+  "koboldcpp",
 ];
 
 async function getCustomModels(provider = "", apiKey = null, basePath = null) {
@@ -39,6 +40,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) {
       return await getOpenRouterModels();
     case "lmstudio":
       return await getLMStudioModels(basePath);
+    case "koboldcpp":
+      return await getKoboldCPPModels(basePath);
     default:
       return { models: [], error: "Invalid provider for custom models" };
   }
@@ -171,6 +174,28 @@ async function getLMStudioModels(basePath = null) {
   }
 }
 
+async function getKoboldCPPModels(basePath = null) {
+  try {
+    const { OpenAI: OpenAIApi } = require("openai");
+    const openai = new OpenAIApi({
+      baseURL: basePath || process.env.LMSTUDIO_BASE_PATH,
+      apiKey: null,
+    });
+    const models = await openai.models
+      .list()
+      .then((results) => results.data)
+      .catch((e) => {
+        console.error(`KoboldCPP:listModels`, e.message);
+        return [];
+      });
+
+    return { models, error: null };
+  } catch (e) {
+    console.error(`KoboldCPP:getKoboldCPPModels`, e.message);
+    return { models: [], error: "Could not fetch KoboldCPP Models" };
+  }
+}
+
 async function ollamaAIModels(basePath = null) {
   let url;
   try {
diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js
index 5d88040dc..ba65e3dfb 100644
--- a/server/utils/helpers/index.js
+++ b/server/utils/helpers/index.js
@@ -77,6 +77,9 @@ function getLLMProvider({ provider = null, model = null } = {}) {
     case "groq":
       const { GroqLLM } = require("../AiProviders/groq");
       return new GroqLLM(embedder, model);
+    case "koboldcpp":
+      const { KoboldCPPLLM } = require("../AiProviders/koboldCPP");
+      return new KoboldCPPLLM(embedder, model);
     case "cohere":
       const { CohereLLM } = require("../AiProviders/cohere");
       return new CohereLLM(embedder, model);
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index 45f2fd546..19cdfe2b2 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -132,6 +132,20 @@ const KEY_MAPPING = {
     checks: [nonZero],
   },
 
+  // KoboldCPP Settings
+  KoboldCPPBasePath: {
+    envKey: "KOBOLD_CPP_BASE_PATH",
+    checks: [isNotEmpty, isValidURL],
+  },
+  KoboldCPPModelPref: {
+    envKey: "KOBOLD_CPP_MODEL_PREF",
+    checks: [isNotEmpty],
+  },
+  KoboldCPPTokenLimit: {
+    envKey: "KOBOLD_CPP_MODEL_TOKEN_LIMIT",
+    checks: [nonZero],
+  },
+
   // Generic OpenAI InferenceSettings
   GenericOpenAiBasePath: {
     envKey: "GENERIC_OPEN_AI_BASE_PATH",
@@ -403,6 +417,7 @@ function supportedLLM(input = "") {
     "perplexity",
     "openrouter",
     "groq",
+    "koboldcpp",
     "cohere",
     "generic-openai",
   ].includes(input);
-- 
GitLab