From c2c8fe97562202ef028d9f57a220e84655502f19 Mon Sep 17 00:00:00 2001
From: Sean Hatfield <seanhatfield5@gmail.com>
Date: Wed, 17 Jan 2024 14:42:05 -0800
Subject: [PATCH] add support for mistral api (#610)

* add support for mistral api

* update docs to show support for Mistral

* add default temp to all providers, suggest different results per provider

---------

Co-authored-by: timothycarambat <rambat1010@gmail.com>
---
 README.md                                     |   1 +
 docker/.env.example                           |   4 +
 .../LLMSelection/MistralOptions/index.jsx     | 103 ++++++++++
 .../Modals/MangeWorkspace/Settings/index.jsx  |  18 +-
 frontend/src/media/llmprovider/mistral.jpeg   | Bin 0 -> 4542 bytes
 .../GeneralSettings/LLMPreference/index.jsx   |  11 +-
 .../Steps/DataHandling/index.jsx              |   8 +
 .../Steps/LLMPreference/index.jsx             |   9 +
 server/.env.example                           |   4 +
 server/models/systemSettings.js               |  12 ++
 server/utils/AiProviders/anthropic/index.js   |   1 +
 server/utils/AiProviders/azureOpenAi/index.js |   5 +-
 server/utils/AiProviders/gemini/index.js      |   1 +
 server/utils/AiProviders/lmStudio/index.js    |   5 +-
 server/utils/AiProviders/localAi/index.js     |   5 +-
 server/utils/AiProviders/mistral/index.js     | 184 ++++++++++++++++++
 server/utils/AiProviders/native/index.js      |   5 +-
 server/utils/AiProviders/ollama/index.js      |   5 +-
 server/utils/AiProviders/openAi/index.js      |   5 +-
 server/utils/AiProviders/togetherAi/index.js  |   5 +-
 server/utils/chats/index.js                   |   2 +-
 server/utils/chats/stream.js                  |   4 +-
 server/utils/helpers/customModels.js          |  23 +++
 server/utils/helpers/index.js                 |   4 +
 server/utils/helpers/updateENV.js             |  10 +
 25 files changed, 412 insertions(+), 22 deletions(-)
 create mode 100644 frontend/src/components/LLMSelection/MistralOptions/index.jsx
 create mode 100644 frontend/src/media/llmprovider/mistral.jpeg
 create mode 100644 server/utils/AiProviders/mistral/index.js

diff --git a/README.md b/README.md
index 4249c42bc..6e3df0df4 100644
--- a/README.md
+++ b/README.md
@@ -71,6 +71,7 @@ Some cool features of AnythingLLM
 - [LM Studio (all models)](https://lmstudio.ai)
 - [LocalAi (all models)](https://localai.io/)
 - [Together AI (chat models)](https://www.together.ai/)
+- [Mistral](https://mistral.ai/)
 
 **Supported Embedding models:**
 
diff --git a/docker/.env.example b/docker/.env.example
index 5bd909af6..8d33a809d 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -44,6 +44,10 @@ GID='1000'
 # TOGETHER_AI_API_KEY='my-together-ai-key'
 # TOGETHER_AI_MODEL_PREF='mistralai/Mixtral-8x7B-Instruct-v0.1'
 
+# LLM_PROVIDER='mistral'
+# MISTRAL_API_KEY='example-mistral-ai-api-key'
+# MISTRAL_MODEL_PREF='mistral-tiny'
+
 ###########################################
 ######## Embedding API SElECTION ##########
 ###########################################
diff --git a/frontend/src/components/LLMSelection/MistralOptions/index.jsx b/frontend/src/components/LLMSelection/MistralOptions/index.jsx
new file mode 100644
index 000000000..d5c666415
--- /dev/null
+++ b/frontend/src/components/LLMSelection/MistralOptions/index.jsx
@@ -0,0 +1,103 @@
+import { useState, useEffect } from "react";
+import System from "@/models/system";
+
+export default function MistralOptions({ settings }) {
+  const [inputValue, setInputValue] = useState(settings?.MistralApiKey);
+  const [mistralKey, setMistralKey] = useState(settings?.MistralApiKey);
+
+  return (
+    <div className="flex gap-x-4">
+      <div className="flex flex-col w-60">
+        <label className="text-white text-sm font-semibold block mb-4">
+          Mistral API Key
+        </label>
+        <input
+          type="password"
+          name="MistralApiKey"
+          className="bg-zinc-900 text-white placeholder-white placeholder-opacity-60 text-sm rounded-lg focus:border-white block w-full p-2.5"
+          placeholder="Mistral API Key"
+          defaultValue={settings?.MistralApiKey ? "*".repeat(20) : ""}
+          required={true}
+          autoComplete="off"
+          spellCheck={false}
+          onChange={(e) => setInputValue(e.target.value)}
+          onBlur={() => setMistralKey(inputValue)}
+        />
+      </div>
+      <MistralModelSelection settings={settings} apiKey={mistralKey} />
+    </div>
+  );
+}
+
+function MistralModelSelection({ apiKey, settings }) {
+  const [customModels, setCustomModels] = useState([]);
+  const [loading, setLoading] = useState(true);
+
+  useEffect(() => {
+    async function findCustomModels() {
+      if (!apiKey) {
+        setCustomModels([]);
+        setLoading(false);
+        return;
+      }
+      setLoading(true);
+      const { models } = await System.customModels(
+        "mistral",
+        typeof apiKey === "boolean" ? null : apiKey
+      );
+      setCustomModels(models || []);
+      setLoading(false);
+    }
+    findCustomModels();
+  }, [apiKey]);
+
+  if (loading || customModels.length == 0) {
+    return (
+      <div className="flex flex-col w-60">
+        <label className="text-white text-sm font-semibold block mb-4">
+          Chat Model Selection
+        </label>
+        <select
+          name="MistralModelPref"
+          disabled={true}
+          className="bg-zinc-900 border border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
+        >
+          <option disabled={true} selected={true}>
+            {!!apiKey
+              ? "-- loading available models --"
+              : "-- waiting for API key --"}
+          </option>
+        </select>
+      </div>
+    );
+  }
+
+  return (
+    <div className="flex flex-col w-60">
+      <label className="text-white text-sm font-semibold block mb-4">
+        Chat Model Selection
+      </label>
+      <select
+        name="MistralModelPref"
+        required={true}
+        className="bg-zinc-900 border border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
+      >
+        {customModels.length > 0 && (
+          <optgroup label="Available Mistral Models">
+            {customModels.map((model) => {
+              return (
+                <option
+                  key={model.id}
+                  value={model.id}
+                  selected={settings?.MistralModelPref === model.id}
+                >
+                  {model.id}
+                </option>
+              );
+            })}
+          </optgroup>
+        )}
+      </select>
+    </div>
+  );
+}
diff --git a/frontend/src/components/Modals/MangeWorkspace/Settings/index.jsx b/frontend/src/components/Modals/MangeWorkspace/Settings/index.jsx
index a3089d688..da0e7b9f0 100644
--- a/frontend/src/components/Modals/MangeWorkspace/Settings/index.jsx
+++ b/frontend/src/components/Modals/MangeWorkspace/Settings/index.jsx
@@ -27,11 +27,21 @@ function castToType(key, value) {
   return definitions[key].cast(value);
 }
 
+function recommendedSettings(provider = null) {
+  switch (provider) {
+    case "mistral":
+      return { temp: 0 };
+    default:
+      return { temp: 0.7 };
+  }
+}
+
 export default function WorkspaceSettings({ active, workspace, settings }) {
   const { slug } = useParams();
   const formEl = useRef(null);
   const [saving, setSaving] = useState(false);
   const [hasChanges, setHasChanges] = useState(false);
+  const defaults = recommendedSettings(settings?.LLMProvider);
 
   const handleUpdate = async (e) => {
     setSaving(true);
@@ -143,20 +153,20 @@ export default function WorkspaceSettings({ active, workspace, settings }) {
                       This setting controls how "random" or dynamic your chat
                       responses will be.
                       <br />
-                      The higher the number (2.0 maximum) the more random and
+                      The higher the number (1.0 maximum) the more random and
                       incoherent.
                       <br />
-                      <i>Recommended: 0.7</i>
+                      <i>Recommended: {defaults.temp}</i>
                     </p>
                   </div>
                   <input
                     name="openAiTemp"
                     type="number"
                     min={0.0}
-                    max={2.0}
+                    max={1.0}
                     step={0.1}
                     onWheel={(e) => e.target.blur()}
-                    defaultValue={workspace?.openAiTemp ?? 0.7}
+                    defaultValue={workspace?.openAiTemp ?? defaults.temp}
                     className="bg-zinc-900 text-white text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5"
                     placeholder="0.7"
                     required={true}
diff --git a/frontend/src/media/llmprovider/mistral.jpeg b/frontend/src/media/llmprovider/mistral.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..1019f495d4d690dd639aa9f4e5751c403b2eff27
GIT binary patch
literal 4542
zcmex=<NpH&0WUXCHwH#V1_nk3Mh1rew;7xnn3<SBh?$w0g_)U!m4lU)g@u)iot=$?
zhl_`Yn~R&9mrqE5mrsz7n_ECkKu}mjR8*9QUtB^=L_$bJR0L!QBQpyND+?<pD=Vi6
zFE_6U$>9GX20;#n6AUMq8I>5A1R0qH8UG()kYQkCU}9zjxt9S7*g05O*_b(*K(dUC
z3?NA)X;wCN7G@?6#{ah%c$gU(m;{&w7#JA0DnK=(P}<^4I42)`U4G}Y<mRuo>Wuea
zY*<*(%fJz){blWZ{Uv&pZP8Y8i_9jzyBK%qKZ5`RL(S`x$G-Hu*pZveyt~BqkN4gS
ziodV3zR&G_+hKH6+;UaVP3h~**G`wqxs~2%+t_u>tjNY&C&M6;*>61qgS^#|^3Wv`
zWj)Ns{KofBr7WH9H!Z(%iF)D6v&YV_l~lId!Z7n;=e(a$C#6gS7#MzDfBv%j;O%s&
zY1XUX&YHOG+r@qRC)FtLk5E=xxj3%><;|@c_x5fMGP``VaN~5ZzJz_392ofLR_y#?
z6MIdx=j1n;>C5vkA3kJq=ly!~vSS6OpBet%*Qcl2efak$51(|FW4=9W!-DSmoE5K<
zU|{^bK6c~vA`d@jvpZJOs?j-9j~g$(D6>=W+*O@`Gv)Q!mJx5B&T9`{?K^F)*_m(W
zdN$WZMQzQ@x#IIM?_2`||J<s%+nVPEWf#8+e~~G4DMY$9<)n5yXVIiD7N(YpYc}*8
zpZ2zA#nxq~!}@NV7J7cg{<19VjwRp2iY;z-^F@Hd^@3m3dP&_R_pt42Mfa^Q6H3?N
z)G}ZQiMm*#82|kGTjf>z7cK5y9h0+US^X^4&ju*bh|b~ve}q9tfPsOTjg5(kiJ1{1
z&&0yUz{t$Xt{@;HW@2h4YAj?VXdtYZQ1Jg20|z4`10yTbe};f1tJPH|AGsQ4X*28R
z#O*Vs9cFRqWM6-MnB%j+>&S=wXL|RXxP7L#L4SgN{CBO|n;TSj?c4r-LG{MwmwW5)
zto)vKVA`u+_xClvJI254>wktKNB#$k&+grvpJB{?uydu`ttq*zP8J(ymIf|XQ9G*b
zIazI?7Q2wtDL>Jkgy0MXx!kw2tJCfsz5lbZ>OaFy#ofG1zUM~$c3C}Hcl(xY_do_Z
zmq~lRyS4ROXSiB)erE38*Z-b$$$vb*d0o!y*;dca?cBUB!&uy@cTLRc%CvQ}CcD_|
z+#p$^kREgMx>@Pjxy9)*N9BKNgA7;xx&MOvhJDv2-@Lc~oyWiYFYIshw=VfszJ88u
z(&P$8%QLK-8Iuyv&Ah}m)6=YB`V8?%L1TFXu`{fVGWs$q8z&1)Ra>}Z(ar+L_aK!y
z2O_3_^8-21>(dYQpVQXZt_wW*ko~sa<JWb+<TndROJDzcQv39S`Ol&@eB0@+-Xfo~
zyMOKP7j<Vma`NN9t6Xt+npG)aK9O0SpGoEI^evx~md_JhBHVP`B=Ffq_Zb|U7fO2g
zH-v3}|M2gh4(St(dpe}z{xeM32eNTu{s-?e?$>oc<a1Kz<i7s*RO$YMX>Y`@{rzBf
z<Fv)?t5YtOn+KkHneu1lT1N9+M)OP|qco@7T3@#_Q`wq5Mf}7XW^V(djT%l3{{KLw
zj;SC6BQqltCpQZ-B=LYtAr@9aHUS}FQH8*uLJ>nFV-v^V#EA<JiYXUubZ`m)CnIJ?
zMtjD8DGF9@O*>VjSM5@B`5<9h31Ue+;yod?<<ZYsHrG;LZELHX$IWDx8X!LHcTYgj
z>EOB?UGdP}@5EAHT}{l6u3M`gch2I;3z;ze@5ggXB3HZUuk5L8oso3LP^#j@j@E@4
z<+i8V&M#kXpSxY>uvlU@M{>5L?7gZjwyW$EQ>>F#tZtg}<jm`?+~sbMC#i5N-WGNF
zc<D}>dEAzz;vHhn+jLYjdHVh{*hbrLJ^GU;BKTJ1k3XyO_RewB(R$%F`+PUot{pq(
zM(f1Ka#$b!wo@mYCpo9Iu<hNSDaXYuEXws)YJcl@jn2Fh-k|o*_c`~ZV;jz?9T%~*
z_CL0E*0<x)-1qVr4}aTocJAZtS9Ek&HLY7^?(pH^<hw^+na|NRPOo_Tm79C@%2khB
zf=|d6+8qD0{{ImMV?hQ6Rz?<fc4k&+fHN>MF|!D=vI#2)2#Fd87A_P~G&BkdPAocj
z(Zs>g84}z~py2-d<h#g~@EFJM6D4kco4=>lp6RVmT~6sbr>#BxZzud=yT9e{<Cj0g
z(=JRk&70EtI%sCD-Vgs;HG?a9Zx>6QPt2IGN^<@Uwfmc|q)du;zNmMhpKZpgrH=Qw
z?dH|B8C=n8^3}7AW1V*Kuk?P;m&sLqsf)hW$IV{3r+i}SywI-a$`T$D9-O(Fwn@@i
z<?9!67RAnT?JHey$l;K~ZV#2Uw`x*n-<Le+W-$GLDVz7vu!{j@DTlOXr-Asq-*?aU
zbvr%#?Tkb5jO7zDqo@8jtzWP&O2_xq&i3tDPZu5etRvsn&3|UkCa$#_``FWRB0JP>
z2dZ}jW{dLVbnI8MF<W$<|0kQZank8b-IY;0W3{#|QoMQhONT~~K+2-iAeQc0wzWzB
z8MOZ&VXzTkU|?ZlV`pP#V}}MZD2$6(1%(WZiUkxDjU6{06c#ZooH#i!>7uBTa!K<h
zlc3;)<bp}yw9UxCX3zBRMNgxPu%2S;O;xG>j6A^tpKVh`%R1c*1P(ZAY;cV|qMUQs
z>_0=!RLR>`t5z+T@^{{`%c~`OSU~dXAGN!pEsQl^=AV1iJ+YR#SZt9hb`hg`%^qbA
zuQ%ZVchs-s3BMBPJJNNjV^4`O+Z`b#&Ya2HrtI1=vxH~sw`AAclv%%nf3aw9@Vm5L
zpsnyYpY%p$tx2KRBfNIM`X*b!CfJh5dn#OHTR~RvwWOlwi^M-={(UVlbLBIsDO+bv
zS*Mxv;L7qfN1mQ{(SLIJ_ccZ1;4`M0*`}H?K{pR>{^^q_;U?IU$PwH1W~RhHnZM3~
zDf`|hPRcspFKoAPdi6=!)=8^233aXB6w0H_amb)Yaq>^DN7K&no+?-2kL={0dQ^9g
z^ptlVa+`(3wY$RK%l&>JP&)JP<p&mhirWG=dRujE+>=vou{yixq}I063C^C**QS+<
zX6mnaWYO~DM~ju8naidvj%|<L>fBQ{degO^!8P$e!%vWOpJLCJPdk=wZc?>1*qrfK
zr6cChrG7*1Df67VC)P66d{z8(`P8Fn=QyT(IdpdMmr07n++H?E^nxn6^jyDie!kLv
z+8-3~s_uW58LUVQXz>($8`k%o>#=6uLG_QKTZ~@KZQIl;IM>U5!LjQrj$L2jc{@At
zkF)OlE&)$N*Ax31?UK3LM0vCXj{h?OxuVglDq;D{Le~@f8n!ALO$OP_s`;{M{?eo3
zO23${w4CzS?)Y({Y?fJE(Iua|N-W!@($2<TRotC_M|#`egFL@i<ji(jZ8g1+X$Om<
zXQq~FbnN^F{e$WEU@CU!-{CP^<B_Xr@Bal<xoZe8Ffy^QvNAFtif6&V#DyOe3?^>;
zD4=NQSa>m^pb?sC8SNGS1swcvr<zGwMbP<X(d+5%7u?SAIrOTzzEk)$bIIX}r`N?6
zy?pz|p-6i+&#ABPv@`m+ml&b4S#M2GIhD24)M0_(ZmTES6BgU3%ZJ-?Fuz>JnR6g!
z@s?o48$~CMrrmg-DAQqoY2uGLY=@<PEj10AwbD1Z&stLSY@5}+2d^G|XWHv5`U;cV
zRC{<+T4}J=%DExuR;7nLy4&?H;>*jZ$1lo_U;m1)4wBeaGmXDqXBk{cs1dhM*R#VK
zqObOgp5y6yc32C-YK~_tRmyPs%I9?<tf?^e!pE4Ie|&^DYHawc$QdGRP&BP+i$F)Y
zz?TM&M=Cp5Y{WZ{8Cc8S=~!6!D3aSTitB>gJ8O_>3_2-Q(I=yluXU|A(7qEkwdo|A
zYjI+On31jSleLDgUBhRHzSH(Pn9{7YbAyAx1RJgk+79!Kzir|7mzgeLQQvVRVY$!C
zGyfUHjsLj4X0WU8Y&z}v@IY#E*JLTz4ABK)jV5vD-CT<ko5e4u#68-Y%=%8I>)By8
z{beWi9?d$&eaDie_0WPgxe2%C$oA|Idf<8Kk(0OKiF~~mhrDh6xH6s4c;MZr$1=;t
zM39013qP!_4^9pOpU>N1q7v0TpAQ~#KX3E-LG1hU^I7U`KA->h`<LzS_cou;>wUi;
z(|kYU^Ld-=cP+ov)b2iS^J%^~^Z6;C*4unOZTIo~U*^{}pU+zz{?B0ZA;$pIbQF>Q
zM;O!v85tNE*;(L?PX<N?K?Oy_#DyC_2m~Iy=ve3wFcFeIK`jP>KL@!MP1at&)4^e0
zj>Kf`^*f$Atxjcnb1x-Z>nPKVx&Iz%Hy)OoF=?53+Xr!Wo@Z_wOaAQBd3pQm<=rJ8
zO0Vx0bM%pYIeE2FX_9vHYzy0o8*7ezy*txtvwE1poY>Y$%ZfbbG@Lu?_?B_STAsrC
zlXJS;3c{c9T)6htcvADqx<ltJ@4P&8)-vz8ulJUNzh9mz)9Di_3s(Jiex`?Ev{UW$
zbKG}Qy%H9Bu2b{=+r3zJw(^12=myrX;`av5TB01Lnx!o~-{<^3lPBxSHkB)E{;K~+
z7&O5Fz{bh~Ykk84VB-e?$H0jf3mX@HbO<>35S&FpwYb2aQ!=yUI!xv}vTt@2n-TDd
zn^(wxLGyM-QKqR;55&BfGtTU+i}0TIiqH9yvejCi_jz*jyQe$$I(&a>^wg?$?cB+W
zk5{}ZIQstarqY~-yN8XIg|M0T`e$x@&^1@-!KKMsscw%?2d5~R3l}OaV|u;vnbtzN
zd1;Rvt}K=cKC|>+T-qbA=%u%2+v_gpo#yg0*P_NLso+Q2+6mRHnMQJ-=evJ!tWjBB
zIrXlL>#Nm~jWhWxnhg2hPE!n(oc&f%;QIWf?VPjU3eH)P{0r2Y(G&nhKN}kpGpu9E
zD9E5_5SaMkqky5~#Dy0XHWoG>MD#rD1^zPxoVk$EGKE7rqeI9%>43-U=y}_1eS2Cw
zQxCZ+d}0vTmVRsEp@N805`m0*Ut2+vhKVXx9_C74bNH-yW>2kp#@{xx=}Fw$#^a8Y
zi`K^+{BYSIrrS|<qn^8vkdlNM@BFSy@ux1upGq|13SjwkzQrS0Y~M@<W0S0Z(<V<S
zcDZDz+tMNNbguB<NnTkiI(}a)JfdBZW+x;tEn4XD*PY@k6Jk~LPWvDJbos-b?kgMg
zCW)smfA}-=N1khQ;zphl?cA_iYqv&cZM#~sV6zTS#oEoUI<-W8ZGUE_)Y)JmnA~o&
za?&#n9i<wWvfNdT9g6m9PvSODJ>U6zz4G_^g1E_0W^G~C?FNDKOSrWI4VL+5-%a_p
z_k!_J5izkFF&0%XnHT1?EUFJtkC>n$)OG6Q+v+v*uJ6)W#L_YI&y~!RCJu$pK3%&U
zoF`gV`bTxDaz0<{k!v0IT7B!gP1SKP+@3V$IacMx)J&J2<;G{Fr!L_2WsmpOLtpOf
z_^D@Z(j!wE<EJq7WySQ>t*0(F8DHAQpLN4_1;;Ip_Ud*Y-?YRHWpyv}6@-M6yIR>;
z9fce&F7=XGxZ;#oEzheb@0=>N6TYo&zO{`%%fLgz=Izeb7c&b#nQQH}mcF>?i_eaq
z(o^H+_CC0_E38XwwVy9@%-5AkI*<P|yfS#QMVoo<%7QHh)4Z8y&s!+X{j*N~Kf}*i
R3@bSfzRvj1aG?JGO#ngx#$5ma

literal 0
HcmV?d00001

diff --git a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
index bd6ae511d..1efa818d3 100644
--- a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
+++ b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
@@ -12,6 +12,7 @@ import OllamaLogo from "@/media/llmprovider/ollama.png";
 import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
 import LocalAiLogo from "@/media/llmprovider/localai.png";
 import TogetherAILogo from "@/media/llmprovider/togetherai.png";
+import MistralLogo from "@/media/llmprovider/mistral.jpeg";
 import PreLoader from "@/components/Preloader";
 import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
 import AzureAiOptions from "@/components/LLMSelection/AzureAiOptions";
@@ -21,9 +22,10 @@ import LocalAiOptions from "@/components/LLMSelection/LocalAiOptions";
 import NativeLLMOptions from "@/components/LLMSelection/NativeLLMOptions";
 import GeminiLLMOptions from "@/components/LLMSelection/GeminiLLMOptions";
 import OllamaLLMOptions from "@/components/LLMSelection/OllamaLLMOptions";
+import TogetherAiOptions from "@/components/LLMSelection/TogetherAiOptions";
+import MistralOptions from "@/components/LLMSelection/MistralOptions";
 import LLMItem from "@/components/LLMSelection/LLMItem";
 import { MagnifyingGlass } from "@phosphor-icons/react";
-import TogetherAiOptions from "@/components/LLMSelection/TogetherAiOptions";
 
 export default function GeneralLLMPreference() {
   const [saving, setSaving] = useState(false);
@@ -134,6 +136,13 @@ export default function GeneralLLMPreference() {
       options: <TogetherAiOptions settings={settings} />,
       description: "Run open source models from Together AI.",
     },
+    {
+      name: "Mistral",
+      value: "mistral",
+      logo: MistralLogo,
+      options: <MistralOptions settings={settings} />,
+      description: "Run open source models from Mistral AI.",
+    },
     {
       name: "Native",
       value: "native",
diff --git a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
index 281f1e8cd..3b0046382 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
@@ -9,6 +9,7 @@ import OllamaLogo from "@/media/llmprovider/ollama.png";
 import TogetherAILogo from "@/media/llmprovider/togetherai.png";
 import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
 import LocalAiLogo from "@/media/llmprovider/localai.png";
+import MistralLogo from "@/media/llmprovider/mistral.jpeg";
 import ChromaLogo from "@/media/vectordbs/chroma.png";
 import PineconeLogo from "@/media/vectordbs/pinecone.png";
 import LanceDbLogo from "@/media/vectordbs/lancedb.png";
@@ -91,6 +92,13 @@ const LLM_SELECTION_PRIVACY = {
     ],
     logo: TogetherAILogo,
   },
+  mistral: {
+    name: "Mistral",
+    description: [
+      "Your prompts and document text used in response creation are visible to Mistral",
+    ],
+    logo: MistralLogo,
+  },
 };
 
 const VECTOR_DB_PRIVACY = {
diff --git a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
index dc060594e..9e8ab84a9 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
@@ -9,6 +9,7 @@ import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
 import LocalAiLogo from "@/media/llmprovider/localai.png";
 import TogetherAILogo from "@/media/llmprovider/togetherai.png";
 import AnythingLLMIcon from "@/media/logo/anything-llm-icon.png";
+import MistralLogo from "@/media/llmprovider/mistral.jpeg";
 import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
 import AzureAiOptions from "@/components/LLMSelection/AzureAiOptions";
 import AnthropicAiOptions from "@/components/LLMSelection/AnthropicAiOptions";
@@ -17,6 +18,7 @@ import LocalAiOptions from "@/components/LLMSelection/LocalAiOptions";
 import NativeLLMOptions from "@/components/LLMSelection/NativeLLMOptions";
 import GeminiLLMOptions from "@/components/LLMSelection/GeminiLLMOptions";
 import OllamaLLMOptions from "@/components/LLMSelection/OllamaLLMOptions";
+import MistralOptions from "@/components/LLMSelection/MistralOptions";
 import LLMItem from "@/components/LLMSelection/LLMItem";
 import System from "@/models/system";
 import paths from "@/utils/paths";
@@ -109,6 +111,13 @@ export default function LLMPreference({
       options: <TogetherAiOptions settings={settings} />,
       description: "Run open source models from Together AI.",
     },
+    {
+      name: "Mistral",
+      value: "mistral",
+      logo: MistralLogo,
+      options: <MistralOptions settings={settings} />,
+      description: "Run open source models from Mistral AI.",
+    },
     {
       name: "Native",
       value: "native",
diff --git a/server/.env.example b/server/.env.example
index d060e0ab5..26c51927c 100644
--- a/server/.env.example
+++ b/server/.env.example
@@ -41,6 +41,10 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea
 # TOGETHER_AI_API_KEY='my-together-ai-key'
 # TOGETHER_AI_MODEL_PREF='mistralai/Mixtral-8x7B-Instruct-v0.1'
 
+# LLM_PROVIDER='mistral'
+# MISTRAL_API_KEY='example-mistral-ai-api-key'
+# MISTRAL_MODEL_PREF='mistral-tiny'
+
 ###########################################
 ######## Embedding API SElECTION ##########
 ###########################################
diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js
index cd008d420..53d42f2e2 100644
--- a/server/models/systemSettings.js
+++ b/server/models/systemSettings.js
@@ -159,6 +159,18 @@ const SystemSettings = {
             AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
           }
         : {}),
+      ...(llmProvider === "mistral"
+        ? {
+            MistralApiKey: !!process.env.MISTRAL_API_KEY,
+            MistralModelPref: process.env.MISTRAL_MODEL_PREF,
+
+            // For embedding credentials when mistral is selected.
+            OpenAiKey: !!process.env.OPEN_AI_KEY,
+            AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT,
+            AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY,
+            AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
+          }
+        : {}),
       ...(llmProvider === "native"
         ? {
             NativeLLMModelPref: process.env.NATIVE_LLM_MODEL_PREF,
diff --git a/server/utils/AiProviders/anthropic/index.js b/server/utils/AiProviders/anthropic/index.js
index 17f2abc4a..56d3a80f0 100644
--- a/server/utils/AiProviders/anthropic/index.js
+++ b/server/utils/AiProviders/anthropic/index.js
@@ -26,6 +26,7 @@ class AnthropicLLM {
       );
     this.embedder = embedder;
     this.answerKey = v4().split("-")[0];
+    this.defaultTemp = 0.7;
   }
 
   streamingEnabled() {
diff --git a/server/utils/AiProviders/azureOpenAi/index.js b/server/utils/AiProviders/azureOpenAi/index.js
index f59fc51fa..639ac102e 100644
--- a/server/utils/AiProviders/azureOpenAi/index.js
+++ b/server/utils/AiProviders/azureOpenAi/index.js
@@ -25,6 +25,7 @@ class AzureOpenAiLLM {
         "No embedding provider defined for AzureOpenAiLLM - falling back to AzureOpenAiEmbedder for embedding!"
       );
     this.embedder = !embedder ? new AzureOpenAiEmbedder() : embedder;
+    this.defaultTemp = 0.7;
   }
 
   #appendContext(contextTexts = []) {
@@ -93,7 +94,7 @@ class AzureOpenAiLLM {
     );
     const textResponse = await this.openai
       .getChatCompletions(this.model, messages, {
-        temperature: Number(workspace?.openAiTemp ?? 0.7),
+        temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
         n: 1,
       })
       .then((res) => {
@@ -130,7 +131,7 @@ class AzureOpenAiLLM {
       this.model,
       messages,
       {
-        temperature: Number(workspace?.openAiTemp ?? 0.7),
+        temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
         n: 1,
       }
     );
diff --git a/server/utils/AiProviders/gemini/index.js b/server/utils/AiProviders/gemini/index.js
index 348c8f5ed..63549fb8d 100644
--- a/server/utils/AiProviders/gemini/index.js
+++ b/server/utils/AiProviders/gemini/index.js
@@ -22,6 +22,7 @@ class GeminiLLM {
         "INVALID GEMINI LLM SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Gemini as your LLM."
       );
     this.embedder = embedder;
+    this.defaultTemp = 0.7; // not used for Gemini
   }
 
   #appendContext(contextTexts = []) {
diff --git a/server/utils/AiProviders/lmStudio/index.js b/server/utils/AiProviders/lmStudio/index.js
index 614808034..08950a7b9 100644
--- a/server/utils/AiProviders/lmStudio/index.js
+++ b/server/utils/AiProviders/lmStudio/index.js
@@ -25,6 +25,7 @@ class LMStudioLLM {
         "INVALID LM STUDIO SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use LMStudio as your LLM."
       );
     this.embedder = embedder;
+    this.defaultTemp = 0.7;
   }
 
   #appendContext(contextTexts = []) {
@@ -85,7 +86,7 @@ class LMStudioLLM {
     const textResponse = await this.lmstudio
       .createChatCompletion({
         model: this.model,
-        temperature: Number(workspace?.openAiTemp ?? 0.7),
+        temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
         n: 1,
         messages: await this.compressMessages(
           {
@@ -122,7 +123,7 @@ class LMStudioLLM {
     const streamRequest = await this.lmstudio.createChatCompletion(
       {
         model: this.model,
-        temperature: Number(workspace?.openAiTemp ?? 0.7),
+        temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
         n: 1,
         stream: true,
         messages: await this.compressMessages(
diff --git a/server/utils/AiProviders/localAi/index.js b/server/utils/AiProviders/localAi/index.js
index 6623ac88e..6d265cf82 100644
--- a/server/utils/AiProviders/localAi/index.js
+++ b/server/utils/AiProviders/localAi/index.js
@@ -27,6 +27,7 @@ class LocalAiLLM {
         "INVALID LOCAL AI SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use LocalAI as your LLM."
       );
     this.embedder = embedder;
+    this.defaultTemp = 0.7;
   }
 
   #appendContext(contextTexts = []) {
@@ -85,7 +86,7 @@ class LocalAiLLM {
     const textResponse = await this.openai
       .createChatCompletion({
         model: this.model,
-        temperature: Number(workspace?.openAiTemp ?? 0.7),
+        temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
         n: 1,
         messages: await this.compressMessages(
           {
@@ -123,7 +124,7 @@ class LocalAiLLM {
       {
         model: this.model,
         stream: true,
-        temperature: Number(workspace?.openAiTemp ?? 0.7),
+        temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
         n: 1,
         messages: await this.compressMessages(
           {
diff --git a/server/utils/AiProviders/mistral/index.js b/server/utils/AiProviders/mistral/index.js
new file mode 100644
index 000000000..a25185c76
--- /dev/null
+++ b/server/utils/AiProviders/mistral/index.js
@@ -0,0 +1,184 @@
+const { chatPrompt } = require("../../chats");
+
+class MistralLLM {
+  constructor(embedder = null, modelPreference = null) {
+    const { Configuration, OpenAIApi } = require("openai");
+    if (!process.env.MISTRAL_API_KEY)
+      throw new Error("No Mistral API key was set.");
+
+    const config = new Configuration({
+      basePath: "https://api.mistral.ai/v1",
+      apiKey: process.env.MISTRAL_API_KEY,
+    });
+    this.openai = new OpenAIApi(config);
+    this.model =
+      modelPreference || process.env.MISTRAL_MODEL_PREF || "mistral-tiny";
+    this.limits = {
+      history: this.promptWindowLimit() * 0.15,
+      system: this.promptWindowLimit() * 0.15,
+      user: this.promptWindowLimit() * 0.7,
+    };
+
+    if (!embedder)
+      console.warn(
+        "No embedding provider defined for MistralLLM - falling back to OpenAiEmbedder for embedding!"
+      );
+    this.embedder = embedder;
+    this.defaultTemp = 0.0;
+  }
+
+  #appendContext(contextTexts = []) {
+    if (!contextTexts || !contextTexts.length) return "";
+    return (
+      "\nContext:\n" +
+      contextTexts
+        .map((text, i) => {
+          return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+        })
+        .join("")
+    );
+  }
+
+  streamingEnabled() {
+    return "streamChat" in this && "streamGetChatCompletion" in this;
+  }
+
+  promptWindowLimit() {
+    return 32000;
+  }
+
+  async isValidChatCompletionModel(modelName = "") {
+    return true;
+  }
+
+  constructPrompt({
+    systemPrompt = "",
+    contextTexts = [],
+    chatHistory = [],
+    userPrompt = "",
+  }) {
+    const prompt = {
+      role: "system",
+      content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
+    };
+    return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
+  }
+
+  async isSafe(_ = "") {
+    return { safe: true, reasons: [] };
+  }
+
+  async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
+    if (!(await this.isValidChatCompletionModel(this.model)))
+      throw new Error(
+        `Mistral chat: ${this.model} is not valid for chat completion!`
+      );
+
+    const textResponse = await this.openai
+      .createChatCompletion({
+        model: this.model,
+        temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
+        messages: await this.compressMessages(
+          {
+            systemPrompt: chatPrompt(workspace),
+            userPrompt: prompt,
+            chatHistory,
+          },
+          rawHistory
+        ),
+      })
+      .then((json) => {
+        const res = json.data;
+        if (!res.hasOwnProperty("choices"))
+          throw new Error("Mistral chat: No results!");
+        if (res.choices.length === 0)
+          throw new Error("Mistral chat: No results length!");
+        return res.choices[0].message.content;
+      })
+      .catch((error) => {
+        throw new Error(
+          `Mistral::createChatCompletion failed with: ${error.message}`
+        );
+      });
+
+    return textResponse;
+  }
+
+  async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
+    if (!(await this.isValidChatCompletionModel(this.model)))
+      throw new Error(
+        `Mistral chat: ${this.model} is not valid for chat completion!`
+      );
+
+    const streamRequest = await this.openai.createChatCompletion(
+      {
+        model: this.model,
+        stream: true,
+        temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
+        messages: await this.compressMessages(
+          {
+            systemPrompt: chatPrompt(workspace),
+            userPrompt: prompt,
+            chatHistory,
+          },
+          rawHistory
+        ),
+      },
+      { responseType: "stream" }
+    );
+
+    return streamRequest;
+  }
+
+  async getChatCompletion(messages = null, { temperature = 0.7 }) {
+    if (!(await this.isValidChatCompletionModel(this.model)))
+      throw new Error(
+        `Mistral chat: ${this.model} is not valid for chat completion!`
+      );
+
+    const { data } = await this.openai.createChatCompletion({
+      model: this.model,
+      messages,
+      temperature,
+    });
+
+    if (!data.hasOwnProperty("choices")) return null;
+    return data.choices[0].message.content;
+  }
+
+  async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
+    if (!(await this.isValidChatCompletionModel(this.model)))
+      throw new Error(
+        `Mistral chat: ${this.model} is not valid for chat completion!`
+      );
+
+    const streamRequest = await this.openai.createChatCompletion(
+      {
+        model: this.model,
+        stream: true,
+        messages,
+        temperature,
+      },
+      { responseType: "stream" }
+    );
+    return streamRequest;
+  }
+
+  // Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
+  async embedTextInput(textInput) {
+    return await this.embedder.embedTextInput(textInput);
+  }
+  async embedChunks(textChunks = []) {
+    return await this.embedder.embedChunks(textChunks);
+  }
+
+  async compressMessages(promptArgs = {}, rawHistory = []) {
+    const { messageArrayCompressor } = require("../../helpers/chat");
+    const messageArray = this.constructPrompt(promptArgs);
+    return await messageArrayCompressor(this, messageArray, rawHistory);
+  }
+}
+
+module.exports = {
+  MistralLLM,
+};
diff --git a/server/utils/AiProviders/native/index.js b/server/utils/AiProviders/native/index.js
index 66cc84d0c..fff904c46 100644
--- a/server/utils/AiProviders/native/index.js
+++ b/server/utils/AiProviders/native/index.js
@@ -29,6 +29,7 @@ class NativeLLM {
 
     // Make directory when it does not exist in existing installations
     if (!fs.existsSync(this.cacheDir)) fs.mkdirSync(this.cacheDir);
+    this.defaultTemp = 0.7;
   }
 
   async #initializeLlamaModel(temperature = 0.7) {
@@ -132,7 +133,7 @@ class NativeLLM {
       );
 
       const model = await this.#llamaClient({
-        temperature: Number(workspace?.openAiTemp ?? 0.7),
+        temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
       });
       const response = await model.call(messages);
       return response.content;
@@ -145,7 +146,7 @@ class NativeLLM {
 
   async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
     const model = await this.#llamaClient({
-      temperature: Number(workspace?.openAiTemp ?? 0.7),
+      temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
     });
     const messages = await this.compressMessages(
       {
diff --git a/server/utils/AiProviders/ollama/index.js b/server/utils/AiProviders/ollama/index.js
index fce96f369..af7fe8210 100644
--- a/server/utils/AiProviders/ollama/index.js
+++ b/server/utils/AiProviders/ollama/index.js
@@ -20,6 +20,7 @@ class OllamaAILLM {
         "INVALID OLLAMA SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Ollama as your LLM."
       );
     this.embedder = embedder;
+    this.defaultTemp = 0.7;
   }
 
   #ollamaClient({ temperature = 0.07 }) {
@@ -113,7 +114,7 @@ class OllamaAILLM {
     );
 
     const model = this.#ollamaClient({
-      temperature: Number(workspace?.openAiTemp ?? 0.7),
+      temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
     });
     const textResponse = await model
       .pipe(new StringOutputParser())
@@ -136,7 +137,7 @@ class OllamaAILLM {
     );
 
     const model = this.#ollamaClient({
-      temperature: Number(workspace?.openAiTemp ?? 0.7),
+      temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
     });
     const stream = await model
       .pipe(new StringOutputParser())
diff --git a/server/utils/AiProviders/openAi/index.js b/server/utils/AiProviders/openAi/index.js
index 038d201d1..582bc054d 100644
--- a/server/utils/AiProviders/openAi/index.js
+++ b/server/utils/AiProviders/openAi/index.js
@@ -23,6 +23,7 @@ class OpenAiLLM {
         "No embedding provider defined for OpenAiLLM - falling back to OpenAiEmbedder for embedding!"
       );
     this.embedder = !embedder ? new OpenAiEmbedder() : embedder;
+    this.defaultTemp = 0.7;
   }
 
   #appendContext(contextTexts = []) {
@@ -127,7 +128,7 @@ class OpenAiLLM {
     const textResponse = await this.openai
       .createChatCompletion({
         model: this.model,
-        temperature: Number(workspace?.openAiTemp ?? 0.7),
+        temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
         n: 1,
         messages: await this.compressMessages(
           {
@@ -165,7 +166,7 @@ class OpenAiLLM {
       {
         model: this.model,
         stream: true,
-        temperature: Number(workspace?.openAiTemp ?? 0.7),
+        temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
         n: 1,
         messages: await this.compressMessages(
           {
diff --git a/server/utils/AiProviders/togetherAi/index.js b/server/utils/AiProviders/togetherAi/index.js
index 44061dd0a..341661f8d 100644
--- a/server/utils/AiProviders/togetherAi/index.js
+++ b/server/utils/AiProviders/togetherAi/index.js
@@ -28,6 +28,7 @@ class TogetherAiLLM {
         "INVALID TOGETHER AI SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Together AI as your LLM."
       );
     this.embedder = embedder;
+    this.defaultTemp = 0.7;
   }
 
   #appendContext(contextTexts = []) {
@@ -89,7 +90,7 @@ class TogetherAiLLM {
     const textResponse = await this.openai
       .createChatCompletion({
         model: this.model,
-        temperature: Number(workspace?.openAiTemp ?? 0.7),
+        temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
         n: 1,
         messages: await this.compressMessages(
           {
@@ -127,7 +128,7 @@ class TogetherAiLLM {
       {
         model: this.model,
         stream: true,
-        temperature: Number(workspace?.openAiTemp ?? 0.7),
+        temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
         n: 1,
         messages: await this.compressMessages(
           {
diff --git a/server/utils/chats/index.js b/server/utils/chats/index.js
index d63de47d5..764c7795a 100644
--- a/server/utils/chats/index.js
+++ b/server/utils/chats/index.js
@@ -171,7 +171,7 @@ async function chatWithWorkspace(
 
   // Send the text completion.
   const textResponse = await LLMConnector.getChatCompletion(messages, {
-    temperature: workspace?.openAiTemp ?? 0.7,
+    temperature: workspace?.openAiTemp ?? LLMConnector.defaultTemp,
   });
 
   if (!textResponse) {
diff --git a/server/utils/chats/stream.js b/server/utils/chats/stream.js
index ceea8d7d2..cff565ed6 100644
--- a/server/utils/chats/stream.js
+++ b/server/utils/chats/stream.js
@@ -141,7 +141,7 @@ async function streamChatWithWorkspace(
       `\x1b[31m[STREAMING DISABLED]\x1b[0m Streaming is not available for ${LLMConnector.constructor.name}. Will use regular chat method.`
     );
     completeText = await LLMConnector.getChatCompletion(messages, {
-      temperature: workspace?.openAiTemp ?? 0.7,
+      temperature: workspace?.openAiTemp ?? LLMConnector.defaultTemp,
     });
     writeResponseChunk(response, {
       uuid,
@@ -153,7 +153,7 @@ async function streamChatWithWorkspace(
     });
   } else {
     const stream = await LLMConnector.streamGetChatCompletion(messages, {
-      temperature: workspace?.openAiTemp ?? 0.7,
+      temperature: workspace?.openAiTemp ?? LLMConnector.defaultTemp,
     });
     completeText = await handleStreamResponses(response, stream, {
       uuid,
diff --git a/server/utils/helpers/customModels.js b/server/utils/helpers/customModels.js
index 87fe976ec..53c641e75 100644
--- a/server/utils/helpers/customModels.js
+++ b/server/utils/helpers/customModels.js
@@ -5,6 +5,7 @@ const SUPPORT_CUSTOM_MODELS = [
   "ollama",
   "native-llm",
   "togetherai",
+  "mistral",
 ];
 
 async function getCustomModels(provider = "", apiKey = null, basePath = null) {
@@ -20,6 +21,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) {
       return await ollamaAIModels(basePath);
     case "togetherai":
       return await getTogetherAiModels();
+    case "mistral":
+      return await getMistralModels(apiKey);
     case "native-llm":
       return nativeLLMModels();
     default:
@@ -117,6 +120,26 @@ async function getTogetherAiModels() {
   return { models, error: null };
 }
 
+async function getMistralModels(apiKey = null) {
+  const { Configuration, OpenAIApi } = require("openai");
+  const config = new Configuration({
+    apiKey: apiKey || process.env.MISTRAL_API_KEY,
+    basePath: "https://api.mistral.ai/v1",
+  });
+  const openai = new OpenAIApi(config);
+  const models = await openai
+    .listModels()
+    .then((res) => res.data.data.filter((model) => !model.id.includes("embed")))
+    .catch((e) => {
+      console.error(`Mistral:listModels`, e.message);
+      return [];
+    });
+
+  // Api Key was successful so lets save it for future uses
+  if (models.length > 0 && !!apiKey) process.env.MISTRAL_API_KEY = apiKey;
+  return { models, error: null };
+}
+
 function nativeLLMModels() {
   const fs = require("fs");
   const path = require("path");
diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js
index 2b1f3dacf..2eed9057c 100644
--- a/server/utils/helpers/index.js
+++ b/server/utils/helpers/index.js
@@ -52,6 +52,9 @@ function getLLMProvider(modelPreference = null) {
     case "togetherai":
       const { TogetherAiLLM } = require("../AiProviders/togetherAi");
       return new TogetherAiLLM(embedder, modelPreference);
+    case "mistral":
+      const { MistralLLM } = require("../AiProviders/mistral");
+      return new MistralLLM(embedder, modelPreference);
     case "native":
       const { NativeLLM } = require("../AiProviders/native");
       return new NativeLLM(embedder, modelPreference);
@@ -76,6 +79,7 @@ function getEmbeddingEngineSelection() {
       return new LocalAiEmbedder();
     case "native":
       const { NativeEmbedder } = require("../EmbeddingEngines/native");
+      console.log("\x1b[34m[INFO]\x1b[0m Using Native Embedder");
       return new NativeEmbedder();
     default:
       return null;
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index 5c43da519..54e684029 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -95,6 +95,15 @@ const KEY_MAPPING = {
     checks: [nonZero],
   },
 
+  MistralApiKey: {
+    envKey: "MISTRAL_API_KEY",
+    checks: [isNotEmpty],
+  },
+  MistralModelPref: {
+    envKey: "MISTRAL_MODEL_PREF",
+    checks: [isNotEmpty],
+  },
+
   // Native LLM Settings
   NativeLLMModelPref: {
     envKey: "NATIVE_LLM_MODEL_PREF",
@@ -268,6 +277,7 @@ function supportedLLM(input = "") {
     "ollama",
     "native",
     "togetherai",
+    "mistral",
   ].includes(input);
   return validSelection ? null : `${input} is not a valid LLM provider.`;
 }
-- 
GitLab