diff --git a/README.md b/README.md
index e9228b3baef77d4a7a78819c587c4fb97d65c58d..f0a067167ec032e9057c0d428da46408d162b423 100644
--- a/README.md
+++ b/README.md
@@ -54,7 +54,7 @@ Optionally generate a frontend if you've selected the Python or Express back-end
 
 ## Customizing the AI models
 
-The app will default to OpenAI's `gpt-4-vision-preview` LLM and `text-embedding-3-large` embedding model.
+The app will default to OpenAI's `gpt-4-turbo` LLM and `text-embedding-3-large` embedding model.
 
 If you want to use different OpenAI models, add the `--ask-models` CLI parameter.
 
diff --git a/questions.ts b/questions.ts
index 6efbc45d55add0ee441b9c29a822ad4d4cb2c02c..375ad807822a7144deec0e09d26987bdf9254521 100644
--- a/questions.ts
+++ b/questions.ts
@@ -75,7 +75,7 @@ const defaults: QuestionArgs = {
   openAiKey: "",
   llamaCloudKey: "",
   useLlamaParse: false,
-  model: "gpt-4-vision-preview",
+  model: "gpt-4-turbo",
   embeddingModel: "text-embedding-3-large",
   communityProjectConfig: undefined,
   llamapack: "",
diff --git a/templates/types/streaming/fastapi/pyproject.toml b/templates/types/streaming/fastapi/pyproject.toml
index 8c087f734ea6ddd49644c4e3f77b0f9ee2c18de5..30cf267f1863e6c4832288b79f64927fc6060ec9 100644
--- a/templates/types/streaming/fastapi/pyproject.toml
+++ b/templates/types/streaming/fastapi/pyproject.toml
@@ -13,9 +13,9 @@ python = "^3.11,<3.12"
 fastapi = "^0.109.1"
 uvicorn = { extras = ["standard"], version = "^0.23.2" }
 python-dotenv = "^1.0.0"
-llama-index = "0.10.15"
-llama-index-core = "0.10.15"
-llama-index-agent-openai = "0.1.5"
+llama-index = "0.10.28"
+llama-index-core = "0.10.28"
+llama-index-agent-openai = "0.2.2"
 
 [build-system]
 requires = ["poetry-core"]
diff --git a/templates/types/streaming/nextjs/app/components/chat-section.tsx b/templates/types/streaming/nextjs/app/components/chat-section.tsx
index 08afc25487999ad9809aa01c9f95c726c39db374..63f8adf5701ae0aebe02d2911a3a09ff3d05519e 100644
--- a/templates/types/streaming/nextjs/app/components/chat-section.tsx
+++ b/templates/types/streaming/nextjs/app/components/chat-section.tsx
@@ -39,7 +39,7 @@ export default function ChatSection() {
         handleSubmit={handleSubmit}
         handleInputChange={handleInputChange}
         isLoading={isLoading}
-        multiModal={process.env.NEXT_PUBLIC_MODEL === "gpt-4-vision-preview"}
+        multiModal={process.env.NEXT_PUBLIC_MODEL === "gpt-4-turbo"}
       />
     </div>
   );