diff --git a/server/utils/AiProviders/gemini/index.js b/server/utils/AiProviders/gemini/index.js
index b9eb26c3c5deb7205a9495a881b4daf09da22814..962b120136e7ebf7ac37670b62c21a3616bd6444 100644
--- a/server/utils/AiProviders/gemini/index.js
+++ b/server/utils/AiProviders/gemini/index.js
@@ -114,6 +114,24 @@ class GeminiLLM {
       allMessages[allMessages.length - 1].role === "user"
     )
       allMessages.pop();
+
+    // Validate that after every user message, there is a model message
+    // sometimes when using gemini we try to compress messages in order to retain as
+    // much context as possible but this may mess up the order of the messages that the gemini model expects
+    // we do this check to work around the edge case where 2 user prompts may be next to each other, in the message array
+    for (let i = 0; i < allMessages.length; i++) {
+      if (
+        allMessages[i].role === "user" &&
+        i < allMessages.length - 1 &&
+        allMessages[i + 1].role !== "model"
+      ) {
+        allMessages.splice(i + 1, 0, {
+          role: "model",
+          parts: [{ text: "Okay." }],
+        });
+      }
+    }
+
     return allMessages;
   }