diff --git a/server/endpoints/api/workspaceThread/index.js b/server/endpoints/api/workspaceThread/index.js
index 0d6eb59c67a012065062ac3fd83cedd4ed32fb58..d85cf739b136e9109f1fb68810d47e65577a7a88 100644
--- a/server/endpoints/api/workspaceThread/index.js
+++ b/server/endpoints/api/workspaceThread/index.js
@@ -344,7 +344,14 @@ function apiWorkspaceThreadEndpoints(app) {
             example: {
               message: "What is AnythingLLM?",
               mode: "query | chat",
-              userId: 1
+              userId: 1,
+              attachments: [
+               {
+                 name: "image.png",
+                 mime: "image/png",
+                 contentString: "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAA..."
+               }
+              ]
             }
           }
         }
@@ -374,7 +381,12 @@ function apiWorkspaceThreadEndpoints(app) {
       */
       try {
         const { slug, threadSlug } = request.params;
-        const { message, mode = "query", userId } = reqBody(request);
+        const {
+          message,
+          mode = "query",
+          userId,
+          attachments = [],
+        } = reqBody(request);
         const workspace = await Workspace.get({ slug });
         const thread = await WorkspaceThread.get({
           slug: threadSlug,
@@ -414,6 +426,7 @@ function apiWorkspaceThreadEndpoints(app) {
           mode,
           user,
           thread,
+          attachments,
         });
         await Telemetry.sendTelemetry("sent_chat", {
           LLMSelection: process.env.LLM_PROVIDER || "openai",
@@ -469,7 +482,14 @@ function apiWorkspaceThreadEndpoints(app) {
             example: {
               message: "What is AnythingLLM?",
               mode: "query | chat",
-              userId: 1
+              userId: 1,
+              attachments: [
+               {
+                 name: "image.png",
+                 mime: "image/png",
+                 contentString: "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAA..."
+               }
+              ]
             }
           }
         }
@@ -520,7 +540,12 @@ function apiWorkspaceThreadEndpoints(app) {
       */
       try {
         const { slug, threadSlug } = request.params;
-        const { message, mode = "query", userId } = reqBody(request);
+        const {
+          message,
+          mode = "query",
+          userId,
+          attachments = [],
+        } = reqBody(request);
         const workspace = await Workspace.get({ slug });
         const thread = await WorkspaceThread.get({
           slug: threadSlug,
@@ -568,6 +593,7 @@ function apiWorkspaceThreadEndpoints(app) {
           mode,
           user,
           thread,
+          attachments,
         });
         await Telemetry.sendTelemetry("sent_chat", {
           LLMSelection: process.env.LLM_PROVIDER || "openai",
diff --git a/server/swagger/openapi.json b/server/swagger/openapi.json
index 19d14766ce63b2b52537f4b7892f5fe043836659..46a63809a5e08a1aa4c7b4b3c05a522f2180709a 100644
--- a/server/swagger/openapi.json
+++ b/server/swagger/openapi.json
@@ -2902,7 +2902,14 @@
               "example": {
                 "message": "What is AnythingLLM?",
                 "mode": "query | chat",
-                "userId": 1
+                "userId": 1,
+                "attachments": [
+                  {
+                    "name": "image.png",
+                    "mime": "image/png",
+                    "contentString": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAA..."
+                  }
+                ]
               }
             }
           }
@@ -3007,7 +3014,14 @@
               "example": {
                 "message": "What is AnythingLLM?",
                 "mode": "query | chat",
-                "userId": 1
+                "userId": 1,
+                "attachments": [
+                  {
+                    "name": "image.png",
+                    "mime": "image/png",
+                    "contentString": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAA..."
+                  }
+                ]
               }
             }
           }