diff --git a/.vscode/settings.json b/.vscode/settings.json
index 459f57fc3f40903a2cf888014d4880bf647776e4..2e43b1926d4c82c400032c2ad500bedb45c34629 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -1,5 +1,6 @@
 {
   "cSpell.words": [
+    "Dockerized",
     "Ollama",
     "openai",
     "Qdrant",
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 01dde61c9685863c0b2f675c62b19d3cf54e42e0..72fea424a1236dd9266637a9dd9a80e9d3634665 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -83,6 +83,7 @@ RUN cd ./server && npx prisma migrate deploy --schema=./prisma/schema.prisma
 
 # Setup the environment
 ENV NODE_ENV=production
+ENV ANYTHING_LLM_RUNTIME=docker
 
 # Expose the server port
 EXPOSE 3001
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index 11278f97f65eb8677c17ac4276f204d58aabca95..cf572ad0bbc4c097b7773a2f4317d5ac440fc5ab 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -56,7 +56,7 @@ const KEY_MAPPING = {
   // LMStudio Settings
   LMStudioBasePath: {
     envKey: "LMSTUDIO_BASE_PATH",
-    checks: [isNotEmpty, validLLMExternalBasePath],
+    checks: [isNotEmpty, validLLMExternalBasePath, validDockerizedUrl],
   },
   LMStudioTokenLimit: {
     envKey: "LMSTUDIO_MODEL_TOKEN_LIMIT",
@@ -66,7 +66,7 @@ const KEY_MAPPING = {
   // LocalAI Settings
   LocalAiBasePath: {
     envKey: "LOCAL_AI_BASE_PATH",
-    checks: [isNotEmpty, validLLMExternalBasePath],
+    checks: [isNotEmpty, validLLMExternalBasePath, validDockerizedUrl],
   },
   LocalAiModelPref: {
     envKey: "LOCAL_AI_MODEL_PREF",
@@ -83,7 +83,7 @@ const KEY_MAPPING = {
 
   OllamaLLMBasePath: {
     envKey: "OLLAMA_BASE_PATH",
-    checks: [isNotEmpty, validOllamaLLMBasePath],
+    checks: [isNotEmpty, validOllamaLLMBasePath, validDockerizedUrl],
   },
   OllamaLLMModelPref: {
     envKey: "OLLAMA_MODEL_PREF",
@@ -106,7 +106,7 @@ const KEY_MAPPING = {
   },
   EmbeddingBasePath: {
     envKey: "EMBEDDING_BASE_PATH",
-    checks: [isNotEmpty, validLLMExternalBasePath],
+    checks: [isNotEmpty, validLLMExternalBasePath, validDockerizedUrl],
   },
   EmbeddingModelPref: {
     envKey: "EMBEDDING_MODEL_PREF",
@@ -126,7 +126,7 @@ const KEY_MAPPING = {
   // Chroma Options
   ChromaEndpoint: {
     envKey: "CHROMA_ENDPOINT",
-    checks: [isValidURL, validChromaURL],
+    checks: [isValidURL, validChromaURL, validDockerizedUrl],
   },
   ChromaApiHeader: {
     envKey: "CHROMA_API_HEADER",
@@ -140,7 +140,7 @@ const KEY_MAPPING = {
   // Weaviate Options
   WeaviateEndpoint: {
     envKey: "WEAVIATE_ENDPOINT",
-    checks: [isValidURL],
+    checks: [isValidURL, validDockerizedUrl],
   },
   WeaviateApiKey: {
     envKey: "WEAVIATE_API_KEY",
@@ -150,7 +150,7 @@ const KEY_MAPPING = {
   // QDrant Options
   QdrantEndpoint: {
     envKey: "QDRANT_ENDPOINT",
-    checks: [isValidURL],
+    checks: [isValidURL, validDockerizedUrl],
   },
   QdrantApiKey: {
     envKey: "QDRANT_API_KEY",
@@ -318,6 +318,17 @@ function isDownloadedModel(input = "") {
   return files.includes(input);
 }
 
+function validDockerizedUrl(input = "") {
+  if (process.env.ANYTHING_LLM_RUNTIME !== "docker") return null;
+  try {
+    const { hostname } = new URL(input);
+    if (["localhost", "127.0.0.1", "0.0.0.0"].includes(hostname.toLowerCase()))
+      return "Localhost, 127.0.0.1, or 0.0.0.0 origins cannot be reached from inside the AnythingLLM container. Please use host.docker.internal, a real machine ip, or domain to connect to your service.";
+    return null;
+  } catch {}
+  return null;
+}
+
 // This will force update .env variables which for any which reason were not able to be parsed or
 // read from an ENV file as this seems to be a complicating step for many so allowing people to write
 // to the process will at least alleviate that issue. It does not perform comprehensive validity checks or sanity checks