From 1c3d0b19ecaf1a6c6428867f685f845bb9f7a81c Mon Sep 17 00:00:00 2001
From: Marcus Schiesser <mail@marcusschiesser.de>
Date: Wed, 10 Apr 2024 11:12:25 +0800
Subject: [PATCH] feat: Use `gpt-4-turbo` model as default. Upgrade Python
 llama-index to 0.10.28

---
 README.md                                                   | 2 +-
 questions.ts                                                | 2 +-
 templates/types/streaming/fastapi/pyproject.toml            | 6 +++---
 .../types/streaming/nextjs/app/components/chat-section.tsx  | 2 +-
 4 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/README.md b/README.md
index e9228b3b..f0a06716 100644
--- a/README.md
+++ b/README.md
@@ -54,7 +54,7 @@ Optionally generate a frontend if you've selected the Python or Express back-end
 
 ## Customizing the AI models
 
-The app will default to OpenAI's `gpt-4-vision-preview` LLM and `text-embedding-3-large` embedding model.
+The app will default to OpenAI's `gpt-4-turbo` LLM and `text-embedding-3-large` embedding model.
 
 If you want to use different OpenAI models, add the `--ask-models` CLI parameter.
 
diff --git a/questions.ts b/questions.ts
index 6efbc45d..375ad807 100644
--- a/questions.ts
+++ b/questions.ts
@@ -75,7 +75,7 @@ const defaults: QuestionArgs = {
   openAiKey: "",
   llamaCloudKey: "",
   useLlamaParse: false,
-  model: "gpt-4-vision-preview",
+  model: "gpt-4-turbo",
   embeddingModel: "text-embedding-3-large",
   communityProjectConfig: undefined,
   llamapack: "",
diff --git a/templates/types/streaming/fastapi/pyproject.toml b/templates/types/streaming/fastapi/pyproject.toml
index 8c087f73..30cf267f 100644
--- a/templates/types/streaming/fastapi/pyproject.toml
+++ b/templates/types/streaming/fastapi/pyproject.toml
@@ -13,9 +13,9 @@ python = "^3.11,<3.12"
 fastapi = "^0.109.1"
 uvicorn = { extras = ["standard"], version = "^0.23.2" }
 python-dotenv = "^1.0.0"
-llama-index = "0.10.15"
-llama-index-core = "0.10.15"
-llama-index-agent-openai = "0.1.5"
+llama-index = "0.10.28"
+llama-index-core = "0.10.28"
+llama-index-agent-openai = "0.2.2"
 
 [build-system]
 requires = ["poetry-core"]
diff --git a/templates/types/streaming/nextjs/app/components/chat-section.tsx b/templates/types/streaming/nextjs/app/components/chat-section.tsx
index 08afc254..63f8adf5 100644
--- a/templates/types/streaming/nextjs/app/components/chat-section.tsx
+++ b/templates/types/streaming/nextjs/app/components/chat-section.tsx
@@ -39,7 +39,7 @@ export default function ChatSection() {
         handleSubmit={handleSubmit}
         handleInputChange={handleInputChange}
         isLoading={isLoading}
-        multiModal={process.env.NEXT_PUBLIC_MODEL === "gpt-4-vision-preview"}
+        multiModal={process.env.NEXT_PUBLIC_MODEL === "gpt-4-turbo"}
       />
     </div>
   );
-- 
GitLab