diff --git a/.changeset/strong-dryers-punch.md b/.changeset/strong-dryers-punch.md
new file mode 100644
index 0000000000000000000000000000000000000000..728fdb30e540c4cc99693a42a782014b47423a05
--- /dev/null
+++ b/.changeset/strong-dryers-punch.md
@@ -0,0 +1,5 @@
+---
+"create-llama": patch
+---
+
+Use gpt-4o-mini as default model
diff --git a/helpers/providers/openai.ts b/helpers/providers/openai.ts
index 667c14bed29dbd77e74cdacbd8b7211f66b4e323..2d3b213e0136952b452d15e9f2bbb7b3bfa335e4 100644
--- a/helpers/providers/openai.ts
+++ b/helpers/providers/openai.ts
@@ -8,7 +8,7 @@ import { questionHandlers } from "../../questions";
 
 const OPENAI_API_URL = "https://api.openai.com/v1";
 
-const DEFAULT_MODEL = "gpt-3.5-turbo";
+const DEFAULT_MODEL = "gpt-4o-mini";
 const DEFAULT_EMBEDDING_MODEL = "text-embedding-3-large";
 
 export async function askOpenAIQuestions({
diff --git a/templates/types/streaming/express/src/controllers/engine/settings.ts b/templates/types/streaming/express/src/controllers/engine/settings.ts
index 98160a56a5d5c95270344d0f7f1c0014bfd2b6fa..2207552ac211d97b5cb237f2c9429b5c044089c9 100644
--- a/templates/types/streaming/express/src/controllers/engine/settings.ts
+++ b/templates/types/streaming/express/src/controllers/engine/settings.ts
@@ -55,7 +55,7 @@ export const initSettings = async () => {
 
 function initOpenAI() {
   Settings.llm = new OpenAI({
-    model: process.env.MODEL ?? "gpt-3.5-turbo",
+    model: process.env.MODEL ?? "gpt-4o-mini",
     maxTokens: process.env.LLM_MAX_TOKENS
       ? Number(process.env.LLM_MAX_TOKENS)
       : undefined,