diff --git a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentModelSelection/index.jsx b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentModelSelection/index.jsx
index 270f22ef94fbe56494a79d8bca4c17f2559e1ac0..4e0a9592c14b5216d3a502a617cac6c2cc497b87 100644
--- a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentModelSelection/index.jsx
+++ b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentModelSelection/index.jsx
@@ -1,7 +1,9 @@
 import useGetProviderModels, {
   DISABLED_PROVIDERS,
 } from "@/hooks/useGetProvidersModels";
+import paths from "@/utils/paths";
 import { useTranslation } from "react-i18next";
+import { Link, useParams } from "react-router-dom";
 
 // These models do NOT support function calling
 function supportedModel(provider, model = "") {
@@ -18,11 +20,32 @@ export default function AgentModelSelection({
   workspace,
   setHasChanges,
 }) {
+  const { slug } = useParams();
   const { defaultModels, customModels, loading } =
     useGetProviderModels(provider);
 
   const { t } = useTranslation();
-  if (DISABLED_PROVIDERS.includes(provider)) return null;
+  if (DISABLED_PROVIDERS.includes(provider)) {
+    return (
+      <div className="w-full h-10 justify-center items-center flex">
+        <p className="text-sm font-base text-white text-opacity-60 text-center">
+          Multi-model support is not supported for this provider yet.
+          <br />
+          Agent's will use{" "}
+          <Link
+            to={paths.workspace.settings.chatSettings(slug)}
+            className="underline"
+          >
+            the model set for the workspace
+          </Link>{" "}
+          or{" "}
+          <Link to={paths.settings.llmPreference()} className="underline">
+            the model set for the system.
+          </Link>
+        </p>
+      </div>
+    );
+  }
 
   if (loading) {
     return (
diff --git a/frontend/src/pages/WorkspaceSettings/ChatSettings/ChatModelSelection/index.jsx b/frontend/src/pages/WorkspaceSettings/ChatSettings/WorkspaceLLMSelection/ChatModelSelection/index.jsx
similarity index 99%
rename from frontend/src/pages/WorkspaceSettings/ChatSettings/ChatModelSelection/index.jsx
rename to frontend/src/pages/WorkspaceSettings/ChatSettings/WorkspaceLLMSelection/ChatModelSelection/index.jsx
index 71d943e5ed253c6ffdb8ca6347ae8a0ee660c279..0ca6d0a49322a3e85c8af039ee07de6126560238 100644
--- a/frontend/src/pages/WorkspaceSettings/ChatSettings/ChatModelSelection/index.jsx
+++ b/frontend/src/pages/WorkspaceSettings/ChatSettings/WorkspaceLLMSelection/ChatModelSelection/index.jsx
@@ -2,6 +2,7 @@ import useGetProviderModels, {
   DISABLED_PROVIDERS,
 } from "@/hooks/useGetProvidersModels";
 import { useTranslation } from "react-i18next";
+
 export default function ChatModelSelection({
   provider,
   workspace,
diff --git a/frontend/src/pages/WorkspaceSettings/ChatSettings/WorkspaceLLMSelection/index.jsx b/frontend/src/pages/WorkspaceSettings/ChatSettings/WorkspaceLLMSelection/index.jsx
index 7542f2d76c8133d1147b802a9f76e64e92c24e2f..436215a8382eb8ae4b08cd2101e8c0134b73ed6a 100644
--- a/frontend/src/pages/WorkspaceSettings/ChatSettings/WorkspaceLLMSelection/index.jsx
+++ b/frontend/src/pages/WorkspaceSettings/ChatSettings/WorkspaceLLMSelection/index.jsx
@@ -3,8 +3,10 @@ import AnythingLLMIcon from "@/media/logo/anything-llm-icon.png";
 import WorkspaceLLMItem from "./WorkspaceLLMItem";
 import { AVAILABLE_LLM_PROVIDERS } from "@/pages/GeneralSettings/LLMPreference";
 import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
-import ChatModelSelection from "../ChatModelSelection";
+import ChatModelSelection from "./ChatModelSelection";
 import { useTranslation } from "react-i18next";
+import { Link } from "react-router-dom";
+import paths from "@/utils/paths";
 
 // Some providers can only be associated with a single model.
 // In that case there is no selection to be made so we can just move on.
@@ -148,7 +150,22 @@ export default function WorkspaceLLMSelection({
           </button>
         )}
       </div>
-      {!NO_MODEL_SELECTION.includes(selectedLLM) && (
+      {NO_MODEL_SELECTION.includes(selectedLLM) ? (
+        <>
+          {selectedLLM !== "default" && (
+            <div className="w-full h-10 justify-center items-center flex mt-4">
+              <p className="text-sm font-base text-white text-opacity-60 text-center">
+                Multi-model support is not supported for this provider yet.
+                <br />
+                This workspace will use{" "}
+                <Link to={paths.settings.llmPreference()} className="underline">
+                  the model set for the system.
+                </Link>
+              </p>
+            </div>
+          )}
+        </>
+      ) : (
         <div className="mt-4 flex flex-col gap-y-1">
           <ChatModelSelection
             provider={selectedLLM}
diff --git a/server/utils/agents/index.js b/server/utils/agents/index.js
index cfa3d6a8ac2248ad553a7f100e7b2298ab2d7c7c..66d51b74dee13d4285711d93669f3b548cad5a5a 100644
--- a/server/utils/agents/index.js
+++ b/server/utils/agents/index.js
@@ -10,6 +10,12 @@ const { USER_AGENT, WORKSPACE_AGENT } = require("./defaults");
 class AgentHandler {
   #invocationUUID;
   #funcsToLoad = [];
+  #noProviderModelDefault = {
+    azure: "OPEN_MODEL_PREF",
+    lmstudio: "LMSTUDIO_MODEL_PREF",
+    textgenwebui: null, // does not even use `model` in API req
+    "generic-openai": "GENERIC_OPEN_AI_MODEL_PREF",
+  };
   invocation = null;
   aibitat = null;
   channel = null;
@@ -172,7 +178,7 @@ class AgentHandler {
       case "mistral":
         return "mistral-medium";
       case "generic-openai":
-        return "gpt-3.5-turbo";
+        return null;
       case "perplexity":
         return "sonar-small-online";
       case "textgenwebui":
@@ -182,10 +188,30 @@ class AgentHandler {
     }
   }
 
+  /**
+   * Finds or assumes the model preference value to use for API calls.
+   * If multi-model loading is supported, we use their agent model selection of the workspace
+   * If not supported, we attempt to fallback to the system provider value for the LLM preference
+   * and if that fails - we assume a reasonable base model to exist.
+   * @returns {string} the model preference value to use in API calls
+   */
+  #fetchModel() {
+    if (!Object.keys(this.#noProviderModelDefault).includes(this.provider))
+      return this.invocation.workspace.agentModel || this.#providerDefault();
+
+    // Provider has no reliable default (cant load many models) - so we need to look at system
+    // for the model param.
+    const sysModelKey = this.#noProviderModelDefault[this.provider];
+    if (!!sysModelKey)
+      return process.env[sysModelKey] ?? this.#providerDefault();
+
+    // If all else fails - look at the provider default list
+    return this.#providerDefault();
+  }
+
   #providerSetupAndCheck() {
     this.provider = this.invocation.workspace.agentProvider || "openai";
-    this.model =
-      this.invocation.workspace.agentModel || this.#providerDefault();
+    this.model = this.#fetchModel();
     this.log(`Start ${this.#invocationUUID}::${this.provider}:${this.model}`);
     this.#checkSetup();
   }