diff --git a/server/utils/AiProviders/azureOpenAi/index.js b/server/utils/AiProviders/azureOpenAi/index.js
index 82e28204bca631e36bcb8499d8906d0bb96d2515..83ac3c4cd52b0c356fe08d9eb8ef4570c3514ab5 100644
--- a/server/utils/AiProviders/azureOpenAi/index.js
+++ b/server/utils/AiProviders/azureOpenAi/index.js
@@ -27,6 +27,18 @@ class AzureOpenAiLLM {
     this.embedder = !embedder ? new AzureOpenAiEmbedder() : embedder;
   }
 
+  #appendContext(contextTexts = []) {
+    if (!contextTexts || !contextTexts.length) return "";
+    return (
+      "\nContext:\n" +
+      contextTexts
+        .map((text, i) => {
+          return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+        })
+        .join("")
+    );
+  }
+
   streamingEnabled() {
     return "streamChat" in this && "streamGetChatCompletion" in this;
   }
@@ -55,13 +67,7 @@ class AzureOpenAiLLM {
   }) {
     const prompt = {
       role: "system",
-      content: `${systemPrompt}
-Context:
-    ${contextTexts
-      .map((text, i) => {
-        return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
-      })
-      .join("")}`,
+      content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
     };
     return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
   }
diff --git a/server/utils/AiProviders/gemini/index.js b/server/utils/AiProviders/gemini/index.js
index d0a76c550abe7586e8ff162fe089010b6df1231c..03388e3e203ebc29804f622ae51f3c1216c89068 100644
--- a/server/utils/AiProviders/gemini/index.js
+++ b/server/utils/AiProviders/gemini/index.js
@@ -1,4 +1,3 @@
-const { v4 } = require("uuid");
 const { chatPrompt } = require("../../chats");
 
 class GeminiLLM {
@@ -22,7 +21,18 @@ class GeminiLLM {
         "INVALID GEMINI LLM SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Gemini as your LLM."
       );
     this.embedder = embedder;
-    this.answerKey = v4().split("-")[0];
+  }
+
+  #appendContext(contextTexts = []) {
+    if (!contextTexts || !contextTexts.length) return "";
+    return (
+      "\nContext:\n" +
+      contextTexts
+        .map((text, i) => {
+          return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+        })
+        .join("")
+    );
   }
 
   streamingEnabled() {
@@ -57,13 +67,7 @@ class GeminiLLM {
   }) {
     const prompt = {
       role: "system",
-      content: `${systemPrompt}
-Context:
-    ${contextTexts
-      .map((text, i) => {
-        return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
-      })
-      .join("")}`,
+      content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
     };
     return [
       prompt,
diff --git a/server/utils/AiProviders/lmStudio/index.js b/server/utils/AiProviders/lmStudio/index.js
index 4d9770e6651e1dd38ac21cfa61cd6d04552ee28a..28c107df0804f66b7286f68e3558513792d33313 100644
--- a/server/utils/AiProviders/lmStudio/index.js
+++ b/server/utils/AiProviders/lmStudio/index.js
@@ -27,6 +27,18 @@ class LMStudioLLM {
     this.embedder = embedder;
   }
 
+  #appendContext(contextTexts = []) {
+    if (!contextTexts || !contextTexts.length) return "";
+    return (
+      "\nContext:\n" +
+      contextTexts
+        .map((text, i) => {
+          return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+        })
+        .join("")
+    );
+  }
+
   streamingEnabled() {
     return "streamChat" in this && "streamGetChatCompletion" in this;
   }
@@ -54,13 +66,7 @@ class LMStudioLLM {
   }) {
     const prompt = {
       role: "system",
-      content: `${systemPrompt}
-Context:
-    ${contextTexts
-      .map((text, i) => {
-        return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
-      })
-      .join("")}`,
+      content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
     };
     return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
   }
diff --git a/server/utils/AiProviders/localAi/index.js b/server/utils/AiProviders/localAi/index.js
index 6c7a3263fb1bea34385fcc566275bbe1c28fdaa1..84954c99427888a84e1b6fb538f7068eab83878b 100644
--- a/server/utils/AiProviders/localAi/index.js
+++ b/server/utils/AiProviders/localAi/index.js
@@ -29,6 +29,18 @@ class LocalAiLLM {
     this.embedder = embedder;
   }
 
+  #appendContext(contextTexts = []) {
+    if (!contextTexts || !contextTexts.length) return "";
+    return (
+      "\nContext:\n" +
+      contextTexts
+        .map((text, i) => {
+          return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+        })
+        .join("")
+    );
+  }
+
   streamingEnabled() {
     return "streamChat" in this && "streamGetChatCompletion" in this;
   }
@@ -54,13 +66,7 @@ class LocalAiLLM {
   }) {
     const prompt = {
       role: "system",
-      content: `${systemPrompt}
-Context:
-    ${contextTexts
-      .map((text, i) => {
-        return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
-      })
-      .join("")}`,
+      content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
     };
     return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
   }
diff --git a/server/utils/AiProviders/native/index.js b/server/utils/AiProviders/native/index.js
index 89589d112ef2166e4b2225851245140102e504ba..faac4fa030723ce85f0fea60ee30e52036d5c9a8 100644
--- a/server/utils/AiProviders/native/index.js
+++ b/server/utils/AiProviders/native/index.js
@@ -1,8 +1,6 @@
-const os = require("os");
 const fs = require("fs");
 const path = require("path");
 const { NativeEmbedder } = require("../../EmbeddingEngines/native");
-const { HumanMessage, SystemMessage, AIMessage } = require("langchain/schema");
 const { chatPrompt } = require("../../chats");
 
 // Docs: https://api.js.langchain.com/classes/chat_models_llama_cpp.ChatLlamaCpp.html
@@ -29,12 +27,6 @@ class NativeLLM {
         : path.resolve(__dirname, `../../../storage/models/downloaded`)
     );
 
-    // Set ENV for if llama.cpp needs to rebuild at runtime and machine is not
-    // running Apple Silicon.
-    process.env.NODE_LLAMA_CPP_METAL = os
-      .cpus()
-      .some((cpu) => cpu.model.includes("Apple"));
-
     // Make directory when it does not exist in existing installations
     if (!fs.existsSync(this.cacheDir)) fs.mkdirSync(this.cacheDir);
   }
@@ -56,12 +48,46 @@ class NativeLLM {
   // If the model has been loaded once, it is in the memory now
   // so we can skip  re-loading it and instead go straight to inference.
   // Note: this will break temperature setting hopping between workspaces with different temps.
-  async llamaClient({ temperature = 0.7 }) {
+  async #llamaClient({ temperature = 0.7 }) {
     if (global.llamaModelInstance) return global.llamaModelInstance;
     await this.#initializeLlamaModel(temperature);
     return global.llamaModelInstance;
   }
 
+  #convertToLangchainPrototypes(chats = []) {
+    const {
+      HumanMessage,
+      SystemMessage,
+      AIMessage,
+    } = require("langchain/schema");
+    const langchainChats = [];
+    const roleToMessageMap = {
+      system: SystemMessage,
+      user: HumanMessage,
+      assistant: AIMessage,
+    };
+
+    for (const chat of chats) {
+      if (!roleToMessageMap.hasOwnProperty(chat.role)) continue;
+      const MessageClass = roleToMessageMap[chat.role];
+      langchainChats.push(new MessageClass({ content: chat.content }));
+    }
+
+    return langchainChats;
+  }
+
+  #appendContext(contextTexts = []) {
+    if (!contextTexts || !contextTexts.length) return "";
+    return (
+      "\nContext:\n" +
+      contextTexts
+        .map((text, i) => {
+          return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+        })
+        .join("")
+    );
+  }
+
   streamingEnabled() {
     return "streamChat" in this && "streamGetChatCompletion" in this;
   }
@@ -84,13 +110,7 @@ class NativeLLM {
   }) {
     const prompt = {
       role: "system",
-      content: `${systemPrompt}
-Context:
-    ${contextTexts
-      .map((text, i) => {
-        return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
-      })
-      .join("")}`,
+      content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
     };
     return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
   }
@@ -111,7 +131,7 @@ Context:
         rawHistory
       );
 
-      const model = await this.llamaClient({
+      const model = await this.#llamaClient({
         temperature: Number(workspace?.openAiTemp ?? 0.7),
       });
       const response = await model.call(messages);
@@ -124,7 +144,7 @@ Context:
   }
 
   async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
-    const model = await this.llamaClient({
+    const model = await this.#llamaClient({
       temperature: Number(workspace?.openAiTemp ?? 0.7),
     });
     const messages = await this.compressMessages(
@@ -140,13 +160,13 @@ Context:
   }
 
   async getChatCompletion(messages = null, { temperature = 0.7 }) {
-    const model = await this.llamaClient({ temperature });
+    const model = await this.#llamaClient({ temperature });
     const response = await model.call(messages);
     return response.content;
   }
 
   async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
-    const model = await this.llamaClient({ temperature });
+    const model = await this.#llamaClient({ temperature });
     const responseStream = await model.stream(messages);
     return responseStream;
   }
@@ -167,27 +187,7 @@ Context:
       messageArray,
       rawHistory
     );
-    return this.convertToLangchainPrototypes(compressedMessages);
-  }
-
-  convertToLangchainPrototypes(chats = []) {
-    const langchainChats = [];
-    for (const chat of chats) {
-      switch (chat.role) {
-        case "system":
-          langchainChats.push(new SystemMessage({ content: chat.content }));
-          break;
-        case "user":
-          langchainChats.push(new HumanMessage({ content: chat.content }));
-          break;
-        case "assistant":
-          langchainChats.push(new AIMessage({ content: chat.content }));
-          break;
-        default:
-          break;
-      }
-    }
-    return langchainChats;
+    return this.#convertToLangchainPrototypes(compressedMessages);
   }
 }
 
diff --git a/server/utils/AiProviders/ollama/index.js b/server/utils/AiProviders/ollama/index.js
index f160e5d36f9ebc8bcf53003b7039eeda1de8e582..55205c23d9f4b8dc64e2ce3dc64933d0aee67f0b 100644
--- a/server/utils/AiProviders/ollama/index.js
+++ b/server/utils/AiProviders/ollama/index.js
@@ -40,24 +40,33 @@ class OllamaAILLM {
       AIMessage,
     } = require("langchain/schema");
     const langchainChats = [];
+    const roleToMessageMap = {
+      system: SystemMessage,
+      user: HumanMessage,
+      assistant: AIMessage,
+    };
+
     for (const chat of chats) {
-      switch (chat.role) {
-        case "system":
-          langchainChats.push(new SystemMessage({ content: chat.content }));
-          break;
-        case "user":
-          langchainChats.push(new HumanMessage({ content: chat.content }));
-          break;
-        case "assistant":
-          langchainChats.push(new AIMessage({ content: chat.content }));
-          break;
-        default:
-          break;
-      }
+      if (!roleToMessageMap.hasOwnProperty(chat.role)) continue;
+      const MessageClass = roleToMessageMap[chat.role];
+      langchainChats.push(new MessageClass({ content: chat.content }));
     }
+
     return langchainChats;
   }
 
+  #appendContext(contextTexts = []) {
+    if (!contextTexts || !contextTexts.length) return "";
+    return (
+      "\nContext:\n" +
+      contextTexts
+        .map((text, i) => {
+          return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+        })
+        .join("")
+    );
+  }
+
   streamingEnabled() {
     return "streamChat" in this && "streamGetChatCompletion" in this;
   }
@@ -83,13 +92,7 @@ class OllamaAILLM {
   }) {
     const prompt = {
       role: "system",
-      content: `${systemPrompt}
-Context:
-    ${contextTexts
-      .map((text, i) => {
-        return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
-      })
-      .join("")}`,
+      content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
     };
     return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
   }
diff --git a/server/utils/AiProviders/openAi/index.js b/server/utils/AiProviders/openAi/index.js
index 46464271968244e378b28b1b1d7cf3045eb8cc21..ccc7ba0e9b1ffde32e31d94550f7690da88503c6 100644
--- a/server/utils/AiProviders/openAi/index.js
+++ b/server/utils/AiProviders/openAi/index.js
@@ -24,6 +24,18 @@ class OpenAiLLM {
     this.embedder = !embedder ? new OpenAiEmbedder() : embedder;
   }
 
+  #appendContext(contextTexts = []) {
+    if (!contextTexts || !contextTexts.length) return "";
+    return (
+      "\nContext:\n" +
+      contextTexts
+        .map((text, i) => {
+          return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+        })
+        .join("")
+    );
+  }
+
   streamingEnabled() {
     return "streamChat" in this && "streamGetChatCompletion" in this;
   }
@@ -68,13 +80,7 @@ class OpenAiLLM {
   }) {
     const prompt = {
       role: "system",
-      content: `${systemPrompt}
-Context:
-    ${contextTexts
-      .map((text, i) => {
-        return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
-      })
-      .join("")}`,
+      content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
     };
     return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
   }