diff --git a/frontend/src/components/LLMSelection/GeminiLLMOptions/index.jsx b/frontend/src/components/LLMSelection/GeminiLLMOptions/index.jsx
index a46e5132933783035e590c4e2d47add21247271f..8cb513f312cf6e58bcd34b67daadffccda93760c 100644
--- a/frontend/src/components/LLMSelection/GeminiLLMOptions/index.jsx
+++ b/frontend/src/components/LLMSelection/GeminiLLMOptions/index.jsx
@@ -29,7 +29,7 @@ export default function GeminiLLMOptions({ settings }) {
               required={true}
               className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
             >
-              {["gemini-pro"].map((model) => {
+              {["gemini-pro", "gemini-1.5-pro-latest"].map((model) => {
                 return (
                   <option key={model} value={model}>
                     {model}
diff --git a/frontend/src/hooks/useGetProvidersModels.js b/frontend/src/hooks/useGetProvidersModels.js
index 5dc5cd2eda519e1acc40362c2d6f31f39c75c693..b6cb403e16a0607d875d8e2a1296731e9980da34 100644
--- a/frontend/src/hooks/useGetProvidersModels.js
+++ b/frontend/src/hooks/useGetProvidersModels.js
@@ -5,7 +5,7 @@ import { useEffect, useState } from "react";
 export const DISABLED_PROVIDERS = ["azure", "lmstudio", "native"];
 const PROVIDER_DEFAULT_MODELS = {
   openai: [],
-  gemini: ["gemini-pro"],
+  gemini: ["gemini-pro", "gemini-1.5-pro-latest"],
   anthropic: [
     "claude-instant-1.2",
     "claude-2.0",
diff --git a/server/package.json b/server/package.json
index d4587eb208a5b68301d0ed035ca956550177a0c5..752308c03de890a2650122362893dc52e1ef0b04 100644
--- a/server/package.json
+++ b/server/package.json
@@ -23,7 +23,7 @@
     "@anthropic-ai/sdk": "^0.16.1",
     "@azure/openai": "1.0.0-beta.10",
     "@datastax/astra-db-ts": "^0.1.3",
-    "@google/generative-ai": "^0.1.3",
+    "@google/generative-ai": "^0.7.1",
     "@googleapis/youtube": "^9.0.0",
     "@pinecone-database/pinecone": "^2.0.1",
     "@prisma/client": "5.3.1",
diff --git a/server/utils/AiProviders/gemini/index.js b/server/utils/AiProviders/gemini/index.js
index 35885a2c6c792f717e00788539d366e74804216e..354c1899e52f60b202d21480cf938442255aa42e 100644
--- a/server/utils/AiProviders/gemini/index.js
+++ b/server/utils/AiProviders/gemini/index.js
@@ -14,7 +14,13 @@ class GeminiLLM {
     const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY);
     this.model =
       modelPreference || process.env.GEMINI_LLM_MODEL_PREF || "gemini-pro";
-    this.gemini = genAI.getGenerativeModel({ model: this.model });
+    this.gemini = genAI.getGenerativeModel(
+      { model: this.model },
+      {
+        // Gemini-1.5-pro is only available on the v1beta API.
+        apiVersion: this.model === "gemini-1.5-pro-latest" ? "v1beta" : "v1",
+      }
+    );
     this.limits = {
       history: this.promptWindowLimit() * 0.15,
       system: this.promptWindowLimit() * 0.15,
@@ -49,13 +55,15 @@ class GeminiLLM {
     switch (this.model) {
       case "gemini-pro":
         return 30_720;
+      case "gemini-1.5-pro-latest":
+        return 1_048_576;
       default:
         return 30_720; // assume a gemini-pro model
     }
   }
 
   isValidChatCompletionModel(modelName = "") {
-    const validModels = ["gemini-pro"];
+    const validModels = ["gemini-pro", "gemini-1.5-pro-latest"];
     return validModels.includes(modelName);
   }
 
@@ -90,11 +98,11 @@ class GeminiLLM {
     const allMessages = messages
       .map((message) => {
         if (message.role === "system")
-          return { role: "user", parts: message.content };
+          return { role: "user", parts: [{ text: message.content }] };
         if (message.role === "user")
-          return { role: "user", parts: message.content };
+          return { role: "user", parts: [{ text: message.content }] };
         if (message.role === "assistant")
-          return { role: "model", parts: message.content };
+          return { role: "model", parts: [{ text: message.content }] };
         return null;
       })
       .filter((msg) => !!msg);
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index 811911f3e4fdfa18d4937df415d0ace19dab4c9a..17a4172a4d11ad16d769423c709327c8e4dbc915 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -387,7 +387,7 @@ function supportedTranscriptionProvider(input = "") {
 }
 
 function validGeminiModel(input = "") {
-  const validModels = ["gemini-pro"];
+  const validModels = ["gemini-pro", "gemini-1.5-pro-latest"];
   return validModels.includes(input)
     ? null
     : `Invalid Model type. Must be one of ${validModels.join(", ")}.`;
diff --git a/server/yarn.lock b/server/yarn.lock
index 466ac156033c2181318c65b6c3b710371e4aafd9..8e9d1010c889731728dca2bfe033ed2b72476391 100644
--- a/server/yarn.lock
+++ b/server/yarn.lock
@@ -220,10 +220,10 @@
   resolved "https://registry.yarnpkg.com/@gar/promisify/-/promisify-1.1.3.tgz#555193ab2e3bb3b6adc3d551c9c030d9e860daf6"
   integrity sha512-k2Ty1JcVojjJFwrg/ThKi2ujJ7XNLYaFGNB/bWT9wGR+oSMJHMa5w+CUq6p/pVrKeNNgA7pCqEcjSnHVoqJQFw==
 
-"@google/generative-ai@^0.1.3":
-  version "0.1.3"
-  resolved "https://registry.yarnpkg.com/@google/generative-ai/-/generative-ai-0.1.3.tgz#8e529d4d86c85b64d297b4abf1a653d613a09a9f"
-  integrity sha512-Cm4uJX1sKarpm1mje/MiOIinM7zdUUrQp/5/qGPAgznbdd/B9zup5ehT6c1qGqycFcSopTA1J1HpqHS5kJR8hQ==
+"@google/generative-ai@^0.7.1":
+  version "0.7.1"
+  resolved "https://registry.yarnpkg.com/@google/generative-ai/-/generative-ai-0.7.1.tgz#eb187c75080c0706245699dbc06816c830d8c6a7"
+  integrity sha512-WTjMLLYL/xfA5BW6xAycRPiAX7FNHKAxrid/ayqC1QMam0KAK0NbMeS9Lubw80gVg5xFMLE+H7pw4wdNzTOlxw==
 
 "@googleapis/youtube@^9.0.0":
   version "9.0.0"