diff --git a/frontend/src/components/LLMSelection/NvidiaNimOptions/remote.jsx b/frontend/src/components/LLMSelection/NvidiaNimOptions/remote.jsx
index f1fa4153d9875a643ba3f17a63f3b1163c92a923..9237cd79c6f30108260ae22ffb0fed885dd3ba62 100644
--- a/frontend/src/components/LLMSelection/NvidiaNimOptions/remote.jsx
+++ b/frontend/src/components/LLMSelection/NvidiaNimOptions/remote.jsx
@@ -5,7 +5,7 @@ import { NVIDIA_NIM_COMMON_URLS } from "@/utils/constants";
 import { useState, useEffect } from "react";
 
 /**
- * This component is used to select a remote Nvidia NIM model endpoint
+ * This component is used to select a remote NVIDIA NIM model endpoint
  * This is the default component and way to connect to NVIDIA NIM
  * as the "managed" provider can only work in the Desktop context.
  */
@@ -26,7 +26,7 @@ export default function RemoteNvidiaNimOptions({ settings }) {
       <div className="flex flex-col w-60">
         <div className="flex justify-between items-center mb-2">
           <label className="text-white text-sm font-semibold">
-            Nvidia Nim Base URL
+            NVIDIA Nim Base URL
           </label>
           {loading ? (
             <PreLoader size="6" />
@@ -56,7 +56,7 @@ export default function RemoteNvidiaNimOptions({ settings }) {
           onBlur={basePath.onBlur}
         />
         <p className="text-xs leading-[18px] font-base text-white text-opacity-60 mt-2">
-          Enter the URL where Nvidia NIM is running.
+          Enter the URL where NVIDIA NIM is running.
         </p>
       </div>
       {!settings?.credentialsOnly && (
diff --git a/frontend/src/media/llmprovider/nvidia-nim.png b/frontend/src/media/llmprovider/nvidia-nim.png
index cdec289cf6f28b4a6cb62c868525ed2f9a7d740f..f26e5979f8612bc7120c40d41d97f8c6c226a710 100644
Binary files a/frontend/src/media/llmprovider/nvidia-nim.png and b/frontend/src/media/llmprovider/nvidia-nim.png differ
diff --git a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
index 9e8a049e68bc9e4cd99b88c3486d9a51235dd79e..96ec360fd777562d720bbcf1831c12491dda4d48 100644
--- a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
+++ b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
@@ -96,12 +96,12 @@ export const AVAILABLE_LLM_PROVIDERS = [
     requiredConfig: ["GeminiLLMApiKey"],
   },
   {
-    name: "Nvidia NIM",
+    name: "NVIDIA NIM",
     value: "nvidia-nim",
     logo: NvidiaNimLogo,
     options: (settings) => <NvidiaNimOptions settings={settings} />,
     description:
-      "Run full parameter LLMs directly on your GPU using Nvidia's inference microservice via Docker.",
+      "Run full parameter LLMs directly on your NVIDIA RTX GPU using NVIDIA NIM.",
     requiredConfig: ["NvidiaNimLLMBasePath"],
   },
   {
diff --git a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
index 3121d56e075f499655fabab227e8e2ad374133ba..27e7102ea9fa765b37e3e1035ca36b3e9bc81325 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
@@ -78,9 +78,9 @@ export const LLM_SELECTION_PRIVACY = {
     logo: GeminiLogo,
   },
   "nvidia-nim": {
-    name: "Nvidia NIM",
+    name: "NVIDIA NIM",
     description: [
-      "Your model and chats are only accessible on the machine running the Nvidia NIM service",
+      "Your model and chats are only accessible on the machine running the NVIDIA NIM",
     ],
     logo: NvidiaNimLogo,
   },
diff --git a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
index ebbefde62bba6c7b344c8df3ddf01926854fb6ec..abd4d4520cbe3d2fc4fbb4cf78bd9cb4b25eaa00 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
@@ -92,12 +92,12 @@ const LLMS = [
     description: "Google's largest and most capable AI model",
   },
   {
-    name: "Nvidia NIM",
+    name: "NVIDIA NIM",
     value: "nvidia-nim",
     logo: NvidiaNimLogo,
     options: (settings) => <NvidiaNimOptions settings={settings} />,
     description:
-      "Run full parameter LLMs directly on your GPU using Nvidia's inference microservice via Docker.",
+      "Run full parameter LLMs directly on your NVIDIA RTX GPU using NVIDIA NIM.",
   },
   {
     name: "HuggingFace",
diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js
index 4a862109fb046a3d5b78c7ad454621fb14186c2a..42ce8723bc78318b5a5a60acab07b56c4e02f4e6 100644
--- a/server/models/systemSettings.js
+++ b/server/models/systemSettings.js
@@ -554,7 +554,7 @@ const SystemSettings = {
       XAIApiKey: !!process.env.XAI_LLM_API_KEY,
       XAIModelPref: process.env.XAI_LLM_MODEL_PREF,
 
-      // Nvidia NIM Keys
+      // NVIDIA NIM Keys
       NvidiaNimLLMBasePath: process.env.NVIDIA_NIM_LLM_BASE_PATH,
       NvidiaNimLLMModelPref: process.env.NVIDIA_NIM_LLM_MODEL_PREF,
       NvidiaNimLLMTokenLimit: process.env.NVIDIA_NIM_LLM_MODEL_TOKEN_LIMIT,
diff --git a/server/utils/AiProviders/nvidiaNim/index.js b/server/utils/AiProviders/nvidiaNim/index.js
index 4de408e98cb0cb1dd990c3a7b7fd1869ff2733fe..b421fdc15ba9a9fdb0080f9334a9ddcf84c4288a 100644
--- a/server/utils/AiProviders/nvidiaNim/index.js
+++ b/server/utils/AiProviders/nvidiaNim/index.js
@@ -10,7 +10,7 @@ const {
 class NvidiaNimLLM {
   constructor(embedder = null, modelPreference = null) {
     if (!process.env.NVIDIA_NIM_LLM_BASE_PATH)
-      throw new Error("No Nvidia NIM API Base Path was set.");
+      throw new Error("No NVIDIA NIM API Base Path was set.");
 
     const { OpenAI: OpenAIApi } = require("openai");
     this.nvidiaNim = new OpenAIApi({
@@ -85,7 +85,7 @@ class NvidiaNimLLM {
   static promptWindowLimit(_modelName) {
     const limit = process.env.NVIDIA_NIM_LLM_MODEL_TOKEN_LIMIT || 4096;
     if (!limit || isNaN(Number(limit)))
-      throw new Error("No Nvidia NIM token context limit was set.");
+      throw new Error("No NVIDIA NIM token context limit was set.");
     return Number(limit);
   }
 
@@ -94,7 +94,7 @@ class NvidiaNimLLM {
   promptWindowLimit() {
     const limit = process.env.NVIDIA_NIM_LLM_MODEL_TOKEN_LIMIT || 4096;
     if (!limit || isNaN(Number(limit)))
-      throw new Error("No Nvidia NIM token context limit was set.");
+      throw new Error("No NVIDIA NIM token context limit was set.");
     return Number(limit);
   }
 
@@ -154,7 +154,7 @@ class NvidiaNimLLM {
   async getChatCompletion(messages = null, { temperature = 0.7 }) {
     if (!this.model)
       throw new Error(
-        `Nvidia NIM chat: ${this.model} is not valid or defined model for chat completion!`
+        `NVIDIA NIM chat: ${this.model} is not valid or defined model for chat completion!`
       );
 
     const result = await LLMPerformanceMonitor.measureAsyncFunction(
@@ -190,7 +190,7 @@ class NvidiaNimLLM {
   async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
     if (!this.model)
       throw new Error(
-        `Nvidia NIM chat: ${this.model} is not valid or defined model for chat completion!`
+        `NVIDIA NIM chat: ${this.model} is not valid or defined model for chat completion!`
       );
 
     const measuredStreamRequest = await LLMPerformanceMonitor.measureStream(
diff --git a/server/utils/agents/aibitat/providers/nvidiaNim.js b/server/utils/agents/aibitat/providers/nvidiaNim.js
index 50132abcb50f881924d39edbd75ff3a773d8a17a..6e1bc6959f5b026da2a08d5cd46e673f8c954a90 100644
--- a/server/utils/agents/aibitat/providers/nvidiaNim.js
+++ b/server/utils/agents/aibitat/providers/nvidiaNim.js
@@ -37,9 +37,9 @@ class NvidiaNimProvider extends InheritMultiple([Provider, UnTooled]) {
       })
       .then((result) => {
         if (!result.hasOwnProperty("choices"))
-          throw new Error("Nvidia NIM chat: No results!");
+          throw new Error("NVIDIA NIM chat: No results!");
         if (result.choices.length === 0)
-          throw new Error("Nvidia NIM chat: No results length!");
+          throw new Error("NVIDIA NIM chat: No results length!");
         return result.choices[0].message.content;
       })
       .catch((_) => {
diff --git a/server/utils/agents/index.js b/server/utils/agents/index.js
index eb6b89a838a2672b367102bba504590afcf2fbc1..d4bf908bedcafa86b3bd1e59260719803ae00ba6 100644
--- a/server/utils/agents/index.js
+++ b/server/utils/agents/index.js
@@ -180,7 +180,7 @@ class AgentHandler {
       case "nvidia-nim":
         if (!process.env.NVIDIA_NIM_LLM_BASE_PATH)
           throw new Error(
-            "Nvidia NIM base path must be provided to use agents."
+            "NVIDIA NIM base path must be provided to use agents."
           );
         break;
 
diff --git a/server/utils/helpers/customModels.js b/server/utils/helpers/customModels.js
index 4e4a56715e47caf468c75ee72364e4cc5069aa3c..b3829eb4c1c79ba3ac84631f9a67ce3e953e49db 100644
--- a/server/utils/helpers/customModels.js
+++ b/server/utils/helpers/customModels.js
@@ -550,8 +550,8 @@ async function getNvidiaNimModels(basePath = null) {
 
     return { models, error: null };
   } catch (e) {
-    console.error(`Nvidia NIM:getNvidiaNimModels`, e.message);
-    return { models: [], error: "Could not fetch Nvidia NIM Models" };
+    console.error(`NVIDIA NIM:getNvidiaNimModels`, e.message);
+    return { models: [], error: "Could not fetch NVIDIA NIM Models" };
   }
 }