diff --git a/.vscode/settings.json b/.vscode/settings.json
index 9f6017380e7cfe41a6a08e612b92a9226bb26e7a..329335c14b343b0a75cd8c6946ac2dc74f1f5e9c 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -5,5 +5,8 @@
   "[xml]": {
     "editor.defaultFormatter": "redhat.vscode-xml"
   },
-  "jest.rootPath": "./packages/core"
-}
\ No newline at end of file
+  "jest.rootPath": "./packages/core",
+  "[python]": {
+    "editor.defaultFormatter": "ms-python.black-formatter"
+  }
+}
diff --git a/packages/create-llama/.npmignore b/packages/create-llama/.npmignore
deleted file mode 100644
index 8b89f426d787e05f72258f672adbad907d8217d1..0000000000000000000000000000000000000000
--- a/packages/create-llama/.npmignore
+++ /dev/null
@@ -1,2 +0,0 @@
-**/__pycache__/
-**/poetry.lock
\ No newline at end of file
diff --git a/packages/create-llama/e2e/basic.spec.ts b/packages/create-llama/e2e/basic.spec.ts
index 1a0da5af6da497bfa6d27cfd2e9ace3ba5c14bf1..861f5416c1ff4b34e7871f85104f989c69530d81 100644
--- a/packages/create-llama/e2e/basic.spec.ts
+++ b/packages/create-llama/e2e/basic.spec.ts
@@ -1,6 +1,8 @@
 /* eslint-disable turbo/no-undeclared-env-vars */
 import { expect, test } from "@playwright/test";
 import { ChildProcess } from "child_process";
+import fs from "fs";
+import path from "path";
 import type {
   TemplateEngine,
   TemplateFramework,
@@ -10,7 +12,11 @@ import type {
 import { createTestDir, runApp, runCreateLlama, type AppType } from "./utils";
 
 const templateTypes: TemplateType[] = ["streaming", "simple"];
-const templateFrameworks: TemplateFramework[] = ["nextjs", "express"];
+const templateFrameworks: TemplateFramework[] = [
+  "nextjs",
+  "express",
+  "fastapi",
+];
 const templateEngines: TemplateEngine[] = ["simple", "context"];
 const templateUIs: TemplateUI[] = ["shadcn", "html"];
 
@@ -32,12 +38,16 @@ for (const templateType of templateTypes) {
               ? "--no-frontend" // simple templates don't have frontends
               : "--frontend"
             : "";
+        if (appType === "--no-frontend" && templateUI !== "html") {
+          // if there's no frontend, don't iterate over UIs
+          continue;
+        }
         test.describe(`try create-llama ${templateType} ${templateFramework} ${templateEngine} ${templateUI} ${appType}`, async () => {
           let port: number;
           let externalPort: number;
           let cwd: string;
           let name: string;
-          let cps: ChildProcess[];
+          let cps: ChildProcess[] = [];
 
           test.beforeAll(async () => {
             port = Math.floor(Math.random() * 10000) + 10000;
@@ -54,11 +64,20 @@ for (const templateType of templateTypes) {
               externalPort,
             );
 
-            cps = await runApp(cwd, name, appType, port, externalPort);
+            if (templateFramework !== "fastapi") {
+              // don't run the app for fastapi for now (adds python dependency)
+              cps = await runApp(cwd, name, appType, port, externalPort);
+            }
           });
 
+          test("App folder should exist", async () => {
+            const dirExists = fs.existsSync(path.join(cwd, name));
+            expect(dirExists).toBeTruthy();
+          });
           test("Frontend should have a title", async ({ page }) => {
-            test.skip(appType === "--no-frontend");
+            test.skip(
+              appType === "--no-frontend" || templateFramework === "fastapi",
+            );
             await page.goto(`http://localhost:${port}`);
             await expect(page.getByText("Built by LlamaIndex")).toBeVisible();
           });
@@ -66,7 +85,9 @@ for (const templateType of templateTypes) {
           test("Frontend should be able to submit a message and receive a response", async ({
             page,
           }) => {
-            test.skip(appType === "--no-frontend");
+            test.skip(
+              appType === "--no-frontend" || templateFramework === "fastapi",
+            );
             await page.goto(`http://localhost:${port}`);
             await page.fill("form input", "hello");
             await page.click("form button[type=submit]");
@@ -86,7 +107,9 @@ for (const templateType of templateTypes) {
           test("Backend should response when calling API", async ({
             request,
           }) => {
-            test.skip(appType !== "--no-frontend");
+            test.skip(
+              appType !== "--no-frontend" || templateFramework === "fastapi",
+            );
             const response = await request.post(
               `http://localhost:${port}/api/chat`,
               {
diff --git a/packages/create-llama/index.ts b/packages/create-llama/index.ts
index 84396fa54722696995278a4594588822092a288d..ebdcd1ee26cabad35f55fe8c5389a24d5cb04bc3 100644
--- a/packages/create-llama/index.ts
+++ b/packages/create-llama/index.ts
@@ -121,6 +121,12 @@ Select external port.
   )
   .allowUnknownOption()
   .parse(process.argv);
+if (process.argv.includes("--no-frontend")) {
+  program.frontend = false;
+}
+if (process.argv.includes("--no-eslint")) {
+  program.eslint = false;
+}
 
 const packageManager = !!program.useNpm
   ? "npm"
diff --git a/packages/create-llama/package.json b/packages/create-llama/package.json
index 057f2e0c82fc10d33c5736f455fd46c6f713ffca..efcde345630494fa3472842133e233bcec0d1d2b 100644
--- a/packages/create-llama/package.json
+++ b/packages/create-llama/package.json
@@ -20,10 +20,11 @@
     "dist"
   ],
   "scripts": {
+    "clean": "rimraf --glob ./dist ./templates/**/__pycache__ ./templates/**/node_modules ./templates/**/poetry.lock",
     "dev": "ncc build ./index.ts -w -o dist/",
-    "build": "ncc build ./index.ts -o ./dist/ --minify --no-cache --no-source-map-register",
+    "build": "npm run clean && ncc build ./index.ts -o ./dist/ --minify --no-cache --no-source-map-register",
     "lint": "eslint . --ignore-pattern dist",
-    "e2e": "playwright test --reporter=list",
+    "e2e": "playwright test",
     "prepublishOnly": "cd ../../ && turbo run build"
   },
   "devDependencies": {
@@ -46,6 +47,7 @@
     "got": "10.7.0",
     "picocolors": "1.0.0",
     "prompts": "2.1.0",
+    "rimraf": "^5.0.5",
     "tar": "6.1.15",
     "terminal-link": "^3.0.0",
     "update-check": "1.5.4",
diff --git a/packages/create-llama/questions.ts b/packages/create-llama/questions.ts
index 109ac9fd8a27a4315949306041620e5f025d6b27..d37096011ff94eb97615f4e5d761ce537c836aac 100644
--- a/packages/create-llama/questions.ts
+++ b/packages/create-llama/questions.ts
@@ -89,14 +89,8 @@ export const askQuestions = async (
         })),
         initial: 0,
       },
-      {
-        onCancel: () => {
-          console.error("Exiting.");
-          process.exit(1);
-        },
-      },
+      handlers,
     );
-
     program.communityProjectPath = communityProjectPath;
     preferences.communityProjectPath = communityProjectPath;
     return; // early return - no further questions needed for community projects
@@ -130,11 +124,12 @@ export const askQuestions = async (
     }
   }
 
-  if (program.framework === "express" || program.framework === "fastapi") {
-    if (process.argv.includes("--no-frontend")) {
-      program.frontend = false;
-    }
+  if (
+    program.template === "streaming" &&
+    (program.framework === "express" || program.framework === "fastapi")
+  ) {
     // if a backend-only framework is selected, ask whether we should create a frontend
+    // (only for streaming backends)
     if (program.frontend === undefined) {
       if (ciInfo.isCI) {
         program.frontend = getPrefOrDefault("frontend");
@@ -161,7 +156,6 @@ export const askQuestions = async (
       }
     }
   } else {
-    // single project if framework is nextjs
     program.frontend = false;
   }
 
@@ -189,63 +183,64 @@ export const askQuestions = async (
     }
   }
 
-  if (program.framework === "express" || program.framework === "nextjs") {
-    if (!program.model) {
-      if (ciInfo.isCI) {
-        program.model = getPrefOrDefault("model");
-      } else {
-        const { model } = await prompts(
-          {
-            type: "select",
-            name: "model",
-            message: "Which model would you like to use?",
-            choices: [
-              { title: "gpt-3.5-turbo", value: "gpt-3.5-turbo" },
-              { title: "gpt-4", value: "gpt-4" },
-              { title: "gpt-4-1106-preview", value: "gpt-4-1106-preview" },
-              {
-                title: "gpt-4-vision-preview",
-                value: "gpt-4-vision-preview",
-              },
-            ],
-            initial: 0,
-          },
-          handlers,
-        );
-        program.model = model;
-        preferences.model = model;
-      }
+  if (!program.model) {
+    if (ciInfo.isCI) {
+      program.model = getPrefOrDefault("model");
+    } else {
+      const { model } = await prompts(
+        {
+          type: "select",
+          name: "model",
+          message: "Which model would you like to use?",
+          choices: [
+            { title: "gpt-3.5-turbo", value: "gpt-3.5-turbo" },
+            { title: "gpt-4", value: "gpt-4" },
+            { title: "gpt-4-1106-preview", value: "gpt-4-1106-preview" },
+            {
+              title: "gpt-4-vision-preview",
+              value: "gpt-4-vision-preview",
+            },
+          ],
+          initial: 0,
+        },
+        handlers,
+      );
+      program.model = model;
+      preferences.model = model;
     }
   }
 
-  if (program.framework === "express" || program.framework === "nextjs") {
-    if (!program.engine) {
-      if (ciInfo.isCI) {
-        program.engine = getPrefOrDefault("engine");
-      } else {
-        const { engine } = await prompts(
-          {
-            type: "select",
-            name: "engine",
-            message: "Which data source would you like to use?",
-            choices: [
-              {
-                title: "No data, just a simple chat",
-                value: "simple",
-              },
-              { title: "Use an example PDF", value: "context" },
-            ],
-            initial: 1,
-          },
-          handlers,
-        );
-        program.engine = engine;
-        preferences.engine = engine;
-      }
+  if (!program.engine) {
+    if (ciInfo.isCI) {
+      program.engine = getPrefOrDefault("engine");
+    } else {
+      const { engine } = await prompts(
+        {
+          type: "select",
+          name: "engine",
+          message: "Which data source would you like to use?",
+          choices: [
+            {
+              title: "No data, just a simple chat",
+              value: "simple",
+            },
+            { title: "Use an example PDF", value: "context" },
+          ],
+          initial: 1,
+        },
+        handlers,
+      );
+      program.engine = engine;
+      preferences.engine = engine;
     }
-    if (program.engine !== "simple" && !program.vectorDb) {
-      if (ciInfo.isCI) {
-        program.vectorDb = getPrefOrDefault("vectorDb");
+  }
+
+  if (program.engine !== "simple" && !program.vectorDb) {
+    if (ciInfo.isCI) {
+      program.vectorDb = getPrefOrDefault("vectorDb");
+    } else {
+      if (program.framework === "fastapi") {
+        program.vectorDb = "none";
       } else {
         const { vectorDb } = await prompts(
           {
@@ -282,11 +277,7 @@ export const askQuestions = async (
     preferences.openAiKey = key;
   }
 
-  if (
-    program.framework !== "fastapi" &&
-    !process.argv.includes("--eslint") &&
-    !process.argv.includes("--no-eslint")
-  ) {
+  if (program.framework !== "fastapi" && program.eslint === undefined) {
     if (ciInfo.isCI) {
       program.eslint = getPrefOrDefault("eslint");
     } else {
diff --git a/packages/create-llama/templates/types/simple/fastapi/app/utils/__init__.py b/packages/create-llama/templates/components/vectordbs/python/none/__init__.py
similarity index 100%
rename from packages/create-llama/templates/types/simple/fastapi/app/utils/__init__.py
rename to packages/create-llama/templates/components/vectordbs/python/none/__init__.py
diff --git a/packages/create-llama/templates/components/vectordbs/python/none/constants.py b/packages/create-llama/templates/components/vectordbs/python/none/constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..4180edc4b440cafc26aef00530dc3ba2af3cbdf6
--- /dev/null
+++ b/packages/create-llama/templates/components/vectordbs/python/none/constants.py
@@ -0,0 +1,4 @@
+STORAGE_DIR = "storage"  # directory to cache the generated index
+DATA_DIR = "data"  # directory containing the documents to index
+CHUNK_SIZE = 1024
+CHUNK_OVERLAP = 20
diff --git a/packages/create-llama/templates/components/vectordbs/python/none/context.py b/packages/create-llama/templates/components/vectordbs/python/none/context.py
new file mode 100644
index 0000000000000000000000000000000000000000..ceb8a50ae0cd02425aeb5fa4436df374590a4fad
--- /dev/null
+++ b/packages/create-llama/templates/components/vectordbs/python/none/context.py
@@ -0,0 +1,14 @@
+from llama_index import ServiceContext
+
+from app.context import create_base_context
+from app.engine.constants import CHUNK_SIZE, CHUNK_OVERLAP
+
+
+def create_service_context():
+    base = create_base_context()
+    return ServiceContext.from_defaults(
+        llm=base.llm,
+        embed_model=base.embed_model,
+        chunk_size=CHUNK_SIZE,
+        chunk_overlap=CHUNK_OVERLAP,
+    )
diff --git a/packages/create-llama/templates/components/vectordbs/python/none/generate.py b/packages/create-llama/templates/components/vectordbs/python/none/generate.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c4cd6a9e310f3f2e2f7e4709e94b9073282f151
--- /dev/null
+++ b/packages/create-llama/templates/components/vectordbs/python/none/generate.py
@@ -0,0 +1,31 @@
+import logging
+
+from dotenv import load_dotenv
+
+from app.engine.constants import DATA_DIR, STORAGE_DIR
+from app.engine.context import create_service_context
+
+load_dotenv()
+
+from llama_index import (
+    SimpleDirectoryReader,
+    VectorStoreIndex,
+)
+
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger()
+
+
+def generate_datasource(service_context):
+    logger.info("Creating new index")
+    # load the documents and create the index
+    documents = SimpleDirectoryReader(DATA_DIR).load_data()
+    index = VectorStoreIndex.from_documents(documents, service_context=service_context)
+    # store it for later
+    index.storage_context.persist(STORAGE_DIR)
+    logger.info(f"Finished creating new index. Stored in {STORAGE_DIR}")
+
+
+if __name__ == "__main__":
+    service_context = create_service_context()
+    generate_datasource(service_context)
diff --git a/packages/create-llama/templates/components/vectordbs/python/none/index.py b/packages/create-llama/templates/components/vectordbs/python/none/index.py
new file mode 100644
index 0000000000000000000000000000000000000000..0170d6e83b1acbbc8c49475fcc4e3e376554db94
--- /dev/null
+++ b/packages/create-llama/templates/components/vectordbs/python/none/index.py
@@ -0,0 +1,25 @@
+import logging
+import os
+from llama_index import (
+    StorageContext,
+    load_index_from_storage,
+)
+
+from app.engine.constants import STORAGE_DIR
+from app.engine.context import create_service_context
+
+
+def get_chat_engine():
+    service_context = create_service_context()
+    # check if storage already exists
+    if not os.path.exists(STORAGE_DIR):
+        raise Exception(
+            "StorageContext is empty - call 'python app/engine/generate.py' to generate the storage first"
+        )
+    logger = logging.getLogger("uvicorn")
+    # load the existing index
+    logger.info(f"Loading index from {STORAGE_DIR}...")
+    storage_context = StorageContext.from_defaults(persist_dir=STORAGE_DIR)
+    index = load_index_from_storage(storage_context, service_context=service_context)
+    logger.info(f"Finished loading index from {STORAGE_DIR}")
+    return index.as_chat_engine()
diff --git a/packages/create-llama/templates/components/vectordbs/mongo/generate.mjs b/packages/create-llama/templates/components/vectordbs/typescript/mongo/generate.mjs
similarity index 100%
rename from packages/create-llama/templates/components/vectordbs/mongo/generate.mjs
rename to packages/create-llama/templates/components/vectordbs/typescript/mongo/generate.mjs
diff --git a/packages/create-llama/templates/components/vectordbs/mongo/index.ts b/packages/create-llama/templates/components/vectordbs/typescript/mongo/index.ts
similarity index 100%
rename from packages/create-llama/templates/components/vectordbs/mongo/index.ts
rename to packages/create-llama/templates/components/vectordbs/typescript/mongo/index.ts
diff --git a/packages/create-llama/templates/components/vectordbs/mongo/shared.mjs b/packages/create-llama/templates/components/vectordbs/typescript/mongo/shared.mjs
similarity index 100%
rename from packages/create-llama/templates/components/vectordbs/mongo/shared.mjs
rename to packages/create-llama/templates/components/vectordbs/typescript/mongo/shared.mjs
diff --git a/packages/create-llama/templates/components/vectordbs/none/constants.mjs b/packages/create-llama/templates/components/vectordbs/typescript/none/constants.mjs
similarity index 100%
rename from packages/create-llama/templates/components/vectordbs/none/constants.mjs
rename to packages/create-llama/templates/components/vectordbs/typescript/none/constants.mjs
diff --git a/packages/create-llama/templates/components/vectordbs/none/generate.mjs b/packages/create-llama/templates/components/vectordbs/typescript/none/generate.mjs
similarity index 100%
rename from packages/create-llama/templates/components/vectordbs/none/generate.mjs
rename to packages/create-llama/templates/components/vectordbs/typescript/none/generate.mjs
diff --git a/packages/create-llama/templates/components/vectordbs/none/index.ts b/packages/create-llama/templates/components/vectordbs/typescript/none/index.ts
similarity index 100%
rename from packages/create-llama/templates/components/vectordbs/none/index.ts
rename to packages/create-llama/templates/components/vectordbs/typescript/none/index.ts
diff --git a/packages/create-llama/templates/index.ts b/packages/create-llama/templates/index.ts
index 8392a7b07aa3aed2f30081400b4c64e544dc2c73..4924e1ccc20e6e08cb290f843985c48192305c70 100644
--- a/packages/create-llama/templates/index.ts
+++ b/packages/create-llama/templates/index.ts
@@ -19,17 +19,28 @@ import {
 
 const createEnvLocalFile = async (
   root: string,
-  openAiKey?: string,
-  vectorDb?: TemplateVectorDB,
+  opts?: {
+    openAiKey?: string;
+    vectorDb?: TemplateVectorDB;
+    model?: string;
+    framework?: TemplateFramework;
+  },
 ) => {
   const envFileName = ".env";
   let content = "";
 
-  if (openAiKey) {
-    content += `OPENAI_API_KEY=${openAiKey}\n`;
+  const model = opts?.model || "gpt-3.5-turbo";
+  content += `MODEL=${model}\n`;
+  if (opts?.framework === "nextjs") {
+    content += `NEXT_PUBLIC_MODEL=${model}\n`;
+  }
+  console.log("\nUsing OpenAI model: ", model, "\n");
+
+  if (opts?.openAiKey) {
+    content += `OPENAI_API_KEY=${opts?.openAiKey}\n`;
   }
 
-  switch (vectorDb) {
+  switch (opts?.vectorDb) {
     case "mongo": {
       content += `MONGODB_URI=\n`;
       content += `MONGODB_DATABASE=\n`;
@@ -53,7 +64,7 @@ const copyTestData = async (
   openAiKey?: string,
   vectorDb?: TemplateVectorDB,
 ) => {
-  if (engine === "context" || framework === "fastapi") {
+  if (engine === "context") {
     const srcPath = path.join(__dirname, "components", "data");
     const destPath = path.join(root, "data");
     console.log(`\nCopying test data to ${cyan(destPath)}\n`);
@@ -64,29 +75,29 @@ const copyTestData = async (
   }
 
   if (packageManager && engine === "context") {
+    const runGenerate = `${cyan(
+      framework === "fastapi"
+        ? "python app/engine/generate.py"
+        : `${packageManager} run generate`,
+    )}`;
     const hasOpenAiKey = openAiKey || process.env["OPENAI_API_KEY"];
     const hasVectorDb = vectorDb && vectorDb !== "none";
-    const shouldRunGenerateAfterInstall = hasOpenAiKey && vectorDb === "none";
+    const shouldRunGenerateAfterInstall =
+      hasOpenAiKey && framework !== "fastapi" && vectorDb === "none";
     if (shouldRunGenerateAfterInstall) {
-      console.log(
-        `\nRunning ${cyan(
-          `${packageManager} run generate`,
-        )} to generate the context data.\n`,
-      );
+      console.log(`\nRunning ${runGenerate} to generate the context data.\n`);
       await callPackageManager(packageManager, true, ["run", "generate"]);
-      return console.log();
+      console.log();
+      return;
     }
 
     const settings = [];
     if (!hasOpenAiKey) settings.push("your OpenAI key");
     if (hasVectorDb) settings.push("your Vector DB environment variables");
-    const generateMessage = `run ${cyan(
-      `${packageManager} run generate`,
-    )} to generate the context data.\n`;
-    const message = settings.length
-      ? `After setting ${settings.join(" and ")}, ${generateMessage}`
-      : generateMessage;
-    console.log(`\n${message}\n`);
+    const settingsMessage =
+      settings.length > 0 ? `After setting ${settings.join(" and ")}, ` : "";
+    const generateMessage = `run ${runGenerate} to generate the context data.`;
+    console.log(`\n${settingsMessage}${generateMessage}\n\n`);
   }
 };
 
@@ -176,7 +187,12 @@ const installTSTemplate = async ({
       vectorDBFolder = vectorDb;
     }
 
-    const VectorDBPath = path.join(compPath, "vectordbs", vectorDBFolder);
+    const VectorDBPath = path.join(
+      compPath,
+      "vectordbs",
+      "typescript",
+      vectorDBFolder,
+    );
     relativeEngineDestPath =
       framework === "nextjs"
         ? path.join("app", "api", "chat")
@@ -204,14 +220,6 @@ const installTSTemplate = async ({
     });
   }
 
-  if (framework === "nextjs" || framework === "express") {
-    await fs.writeFile(
-      path.join(root, "constants.ts"),
-      `export const MODEL = "${model || "gpt-3.5-turbo"}";\n`,
-    );
-    console.log("\nUsing OpenAI model: ", model || "gpt-3.5-turbo", "\n");
-  }
-
   /**
    * Update the package.json scripts.
    */
@@ -308,7 +316,8 @@ const installPythonTemplate = async ({
   root,
   template,
   framework,
-}: Pick<InstallTemplateArgs, "root" | "framework" | "template">) => {
+  engine,
+}: Pick<InstallTemplateArgs, "root" | "framework" | "template" | "engine">) => {
   console.log("\nInitializing Python project with template:", template, "\n");
   const templatePath = path.join(__dirname, "types", template, framework);
   await copy("**", root, {
@@ -331,6 +340,15 @@ const installPythonTemplate = async ({
     },
   });
 
+  if (engine === "context") {
+    const compPath = path.join(__dirname, "components");
+    const VectorDBPath = path.join(compPath, "vectordbs", "python", "none");
+    await copy("**", path.join(root, "app", "engine"), {
+      parents: true,
+      cwd: VectorDBPath,
+    });
+  }
+
   console.log(
     "\nPython project, dependencies won't be installed automatically.\n",
   );
@@ -369,7 +387,12 @@ export const installTemplate = async (
     // This is a backend, so we need to copy the test data and create the env file.
 
     // Copy the environment file to the target directory.
-    await createEnvLocalFile(props.root, props.openAiKey, props.vectorDb);
+    await createEnvLocalFile(props.root, {
+      openAiKey: props.openAiKey,
+      vectorDb: props.vectorDb,
+      model: props.model,
+      framework: props.framework,
+    });
 
     // Copy test pdf file
     await copyTestData(
diff --git a/packages/create-llama/templates/types.ts b/packages/create-llama/templates/types.ts
index 6fc7b47c1e33cd2e56e21ad2c423258aa4904f8b..a0567e10e4e3158cb6d32735bcf45fcb55c6631d 100644
--- a/packages/create-llama/templates/types.ts
+++ b/packages/create-llama/templates/types.ts
@@ -13,7 +13,7 @@ export interface InstallTemplateArgs {
   isOnline: boolean;
   template: TemplateType;
   framework: TemplateFramework;
-  engine?: TemplateEngine;
+  engine: TemplateEngine;
   ui: TemplateUI;
   eslint: boolean;
   customApiPath?: string;
diff --git a/packages/create-llama/templates/types/simple/express/constants.ts b/packages/create-llama/templates/types/simple/express/constants.ts
deleted file mode 100644
index 908949251c708651be6d0ae6b09c2982e65b0a1a..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/simple/express/constants.ts
+++ /dev/null
@@ -1 +0,0 @@
-export const MODEL = "gpt-3.5-turbo";
diff --git a/packages/create-llama/templates/types/simple/express/src/controllers/chat.controller.ts b/packages/create-llama/templates/types/simple/express/src/controllers/chat.controller.ts
index 8aa08613f4601f2b142c065028a22e9ef2d4ea74..46dfa56842916908cc3b6df184546d791447f03e 100644
--- a/packages/create-llama/templates/types/simple/express/src/controllers/chat.controller.ts
+++ b/packages/create-llama/templates/types/simple/express/src/controllers/chat.controller.ts
@@ -1,6 +1,5 @@
 import { Request, Response } from "express";
 import { ChatMessage, MessageContent, OpenAI } from "llamaindex";
-import { MODEL } from "../../constants";
 import { createChatEngine } from "./engine";
 
 const getLastMessageContent = (
@@ -34,7 +33,7 @@ export const chat = async (req: Request, res: Response) => {
     }
 
     const llm = new OpenAI({
-      model: MODEL,
+      model: process.env.MODEL || "gpt-3.5-turbo",
     });
 
     const lastMessageContent = getLastMessageContent(
diff --git a/packages/create-llama/templates/types/simple/fastapi/README-template.md b/packages/create-llama/templates/types/simple/fastapi/README-template.md
index f0bfa5e089a960e2327b6ccdd4548948952b36f3..38f3c4a3fa5715639752a987051ff7a2879aa288 100644
--- a/packages/create-llama/templates/types/simple/fastapi/README-template.md
+++ b/packages/create-llama/templates/types/simple/fastapi/README-template.md
@@ -17,7 +17,13 @@ Example `backend/.env` file:
 OPENAI_API_KEY=<openai_api_key>
 ```
 
-Second, run the development server:
+Second, generate the embeddings of the documents in the `./data` directory (if this folder exists - otherwise, skip this step):
+
+```
+python app/engine/generate.py
+```
+
+Third, run the development server:
 
 ```
 python main.py
diff --git a/packages/create-llama/templates/types/simple/fastapi/app/api/routers/chat.py b/packages/create-llama/templates/types/simple/fastapi/app/api/routers/chat.py
index 81f602edbeae66c5850b30a6183c009ab4b1e014..f6a246386a01579da33f13235685cfa99741e76c 100644
--- a/packages/create-llama/templates/types/simple/fastapi/app/api/routers/chat.py
+++ b/packages/create-llama/templates/types/simple/fastapi/app/api/routers/chat.py
@@ -1,10 +1,11 @@
 from typing import List
 
-from app.utils.index import get_index
 from fastapi import APIRouter, Depends, HTTPException, status
-from llama_index import VectorStoreIndex
-from llama_index.llms.base import MessageRole, ChatMessage
+from llama_index.chat_engine.types import BaseChatEngine
+from llama_index.llms.base import ChatMessage
+from llama_index.llms.types import MessageRole
 from pydantic import BaseModel
+from app.engine.index import get_chat_engine
 
 chat_router = r = APIRouter()
 
@@ -25,7 +26,7 @@ class _Result(BaseModel):
 @r.post("")
 async def chat(
     data: _ChatData,
-    index: VectorStoreIndex = Depends(get_index),
+    chat_engine: BaseChatEngine = Depends(get_chat_engine),
 ) -> _Result:
     # check preconditions and get last message
     if len(data.messages) == 0:
@@ -49,7 +50,6 @@ async def chat(
     ]
 
     # query chat engine
-    chat_engine = index.as_chat_engine()
     response = chat_engine.chat(lastMessage.content, messages)
     return _Result(
         result=_Message(role=MessageRole.ASSISTANT, content=response.response)
diff --git a/packages/create-llama/templates/types/simple/fastapi/app/context.py b/packages/create-llama/templates/types/simple/fastapi/app/context.py
new file mode 100644
index 0000000000000000000000000000000000000000..ae00de217c8741e080c981cc3fed21f24fe19961
--- /dev/null
+++ b/packages/create-llama/templates/types/simple/fastapi/app/context.py
@@ -0,0 +1,11 @@
+import os
+
+from llama_index import ServiceContext
+from llama_index.llms import OpenAI
+
+
+def create_base_context():
+    model = os.getenv("MODEL", "gpt-3.5-turbo")
+    return ServiceContext.from_defaults(
+        llm=OpenAI(model=model),
+    )
diff --git a/packages/create-llama/templates/types/streaming/fastapi/app/utils/__init__.py b/packages/create-llama/templates/types/simple/fastapi/app/engine/__init__.py
similarity index 100%
rename from packages/create-llama/templates/types/streaming/fastapi/app/utils/__init__.py
rename to packages/create-llama/templates/types/simple/fastapi/app/engine/__init__.py
diff --git a/packages/create-llama/templates/types/simple/fastapi/app/engine/index.py b/packages/create-llama/templates/types/simple/fastapi/app/engine/index.py
new file mode 100644
index 0000000000000000000000000000000000000000..663b595a40c17f6090bb933bd73ea08ae71286de
--- /dev/null
+++ b/packages/create-llama/templates/types/simple/fastapi/app/engine/index.py
@@ -0,0 +1,7 @@
+from llama_index.chat_engine import SimpleChatEngine
+
+from app.context import create_base_context
+
+
+def get_chat_engine():
+    return SimpleChatEngine.from_defaults(service_context=create_base_context())
diff --git a/packages/create-llama/templates/types/simple/fastapi/app/utils/index.py b/packages/create-llama/templates/types/simple/fastapi/app/utils/index.py
deleted file mode 100644
index 530935b7ec84c93bafee5e4d37265f4e4411fd65..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/simple/fastapi/app/utils/index.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import logging
-import os
-
-from llama_index import (
-    SimpleDirectoryReader,
-    StorageContext,
-    VectorStoreIndex,
-    load_index_from_storage,
-    ServiceContext,
-)
-from llama_index.llms import OpenAI
-
-
-STORAGE_DIR = "./storage"  # directory to cache the generated index
-DATA_DIR = "./data"  # directory containing the documents to index
-
-service_context = ServiceContext.from_defaults(
-    llm=OpenAI(model="gpt-3.5-turbo")
-)
-
-
-def get_index():
-    logger = logging.getLogger("uvicorn")
-    # check if storage already exists
-    if not os.path.exists(STORAGE_DIR):
-        logger.info("Creating new index")
-        # load the documents and create the index
-        documents = SimpleDirectoryReader(DATA_DIR).load_data()
-        index = VectorStoreIndex.from_documents(documents,service_context=service_context)
-        # store it for later
-        index.storage_context.persist(STORAGE_DIR)
-        logger.info(f"Finished creating new index. Stored in {STORAGE_DIR}")
-    else:
-        # load the existing index
-        logger.info(f"Loading index from {STORAGE_DIR}...")
-        storage_context = StorageContext.from_defaults(persist_dir=STORAGE_DIR)
-        index = load_index_from_storage(storage_context,service_context=service_context)
-        logger.info(f"Finished loading index from {STORAGE_DIR}")
-    return index
diff --git a/packages/create-llama/templates/types/simple/fastapi/main.py b/packages/create-llama/templates/types/simple/fastapi/main.py
index 00cb79c44d58f4819243b8ee16c3108bef950382..ba56f0345bacc5ad73e4218a781bee57427e1ec9 100644
--- a/packages/create-llama/templates/types/simple/fastapi/main.py
+++ b/packages/create-llama/templates/types/simple/fastapi/main.py
@@ -1,4 +1,5 @@
 from dotenv import load_dotenv
+
 load_dotenv()
 
 import logging
diff --git a/packages/create-llama/templates/types/simple/fastapi/pyproject.toml b/packages/create-llama/templates/types/simple/fastapi/pyproject.toml
index 59d182bbb47a8d0ee06de3550f2c7fbf954a3901..f9bb9605b78f53417fd9f61e9e03812fdc21047e 100644
--- a/packages/create-llama/templates/types/simple/fastapi/pyproject.toml
+++ b/packages/create-llama/templates/types/simple/fastapi/pyproject.toml
@@ -1,5 +1,5 @@
 [tool.poetry]
-name = "llamaindex-fastapi"
+name = "app"
 version = "0.1.0"
 description = ""
 authors = ["Marcus Schiesser <mail@marcusschiesser.de>"]
@@ -9,7 +9,7 @@ readme = "README.md"
 python = "^3.11,<3.12"
 fastapi = "^0.104.1"
 uvicorn = { extras = ["standard"], version = "^0.23.2" }
-llama-index = "^0.8.56"
+llama-index = "^0.9.19"
 pypdf = "^3.17.0"
 python-dotenv = "^1.0.0"
 
diff --git a/packages/create-llama/templates/types/streaming/express/constants.ts b/packages/create-llama/templates/types/streaming/express/constants.ts
deleted file mode 100644
index 908949251c708651be6d0ae6b09c2982e65b0a1a..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/express/constants.ts
+++ /dev/null
@@ -1 +0,0 @@
-export const MODEL = "gpt-3.5-turbo";
diff --git a/packages/create-llama/templates/types/streaming/express/src/controllers/chat.controller.ts b/packages/create-llama/templates/types/streaming/express/src/controllers/chat.controller.ts
index 1dbd85d45f157bf5e98741e512826f27e420a15a..4bd1c8da6f2e502462a7bf7221ed2874d656adc1 100644
--- a/packages/create-llama/templates/types/streaming/express/src/controllers/chat.controller.ts
+++ b/packages/create-llama/templates/types/streaming/express/src/controllers/chat.controller.ts
@@ -1,7 +1,6 @@
 import { streamToResponse } from "ai";
 import { Request, Response } from "express";
 import { ChatMessage, MessageContent, OpenAI } from "llamaindex";
-import { MODEL } from "../../constants";
 import { createChatEngine } from "./engine";
 import { LlamaIndexStream } from "./llamaindex-stream";
 
@@ -36,7 +35,7 @@ export const chat = async (req: Request, res: Response) => {
     }
 
     const llm = new OpenAI({
-      model: MODEL,
+      model: process.env.MODEL || "gpt-3.5-turbo",
     });
 
     const chatEngine = await createChatEngine(llm);
diff --git a/packages/create-llama/templates/types/streaming/fastapi/README-template.md b/packages/create-llama/templates/types/streaming/fastapi/README-template.md
index f0bfa5e089a960e2327b6ccdd4548948952b36f3..7f659b6893582a697ec6c70ad19b14f1009969c4 100644
--- a/packages/create-llama/templates/types/streaming/fastapi/README-template.md
+++ b/packages/create-llama/templates/types/streaming/fastapi/README-template.md
@@ -9,15 +9,21 @@ poetry install
 poetry shell
 ```
 
-By default, we use the OpenAI LLM (though you can customize, see app/api/routers/chat.py). As a result you need to specify an `OPENAI_API_KEY` in an .env file in this directory.
+By default, we use the OpenAI LLM (though you can customize, see `app/context.py`). As a result you need to specify an `OPENAI_API_KEY` in an .env file in this directory.
 
-Example `backend/.env` file:
+Example `.env` file:
 
 ```
 OPENAI_API_KEY=<openai_api_key>
 ```
 
-Second, run the development server:
+Second, generate the embeddings of the documents in the `./data` directory (if this folder exists - otherwise, skip this step):
+
+```
+python app/engine/generate.py
+```
+
+Third, run the development server:
 
 ```
 python main.py
diff --git a/packages/create-llama/templates/types/streaming/fastapi/app/api/routers/chat.py b/packages/create-llama/templates/types/streaming/fastapi/app/api/routers/chat.py
index 36b618e232a33fc882b3d4b42b885c29c64edd4e..9dd9eb6ed30544899a02c7eb3678239a5b94ea6d 100644
--- a/packages/create-llama/templates/types/streaming/fastapi/app/api/routers/chat.py
+++ b/packages/create-llama/templates/types/streaming/fastapi/app/api/routers/chat.py
@@ -1,12 +1,12 @@
 from typing import List
 
 from fastapi.responses import StreamingResponse
+from llama_index.chat_engine.types import BaseChatEngine
 
-from app.utils.json import json_to_model
-from app.utils.index import get_index
+from app.engine.index import get_chat_engine
 from fastapi import APIRouter, Depends, HTTPException, Request, status
-from llama_index import VectorStoreIndex
-from llama_index.llms.base import MessageRole, ChatMessage
+from llama_index.llms.base import ChatMessage
+from llama_index.llms.types import MessageRole
 from pydantic import BaseModel
 
 chat_router = r = APIRouter()
@@ -24,10 +24,8 @@ class _ChatData(BaseModel):
 @r.post("")
 async def chat(
     request: Request,
-    # Note: To support clients sending a JSON object using content-type "text/plain",
-    # we need to use Depends(json_to_model(_ChatData)) here
-    data: _ChatData = Depends(json_to_model(_ChatData)),
-    index: VectorStoreIndex = Depends(get_index),
+    data: _ChatData,
+    chat_engine: BaseChatEngine = Depends(get_chat_engine),
 ):
     # check preconditions and get last message
     if len(data.messages) == 0:
@@ -51,7 +49,6 @@ async def chat(
     ]
 
     # query chat engine
-    chat_engine = index.as_chat_engine()
     response = chat_engine.stream_chat(lastMessage.content, messages)
 
     # stream response
diff --git a/packages/create-llama/templates/types/streaming/fastapi/app/context.py b/packages/create-llama/templates/types/streaming/fastapi/app/context.py
new file mode 100644
index 0000000000000000000000000000000000000000..ae00de217c8741e080c981cc3fed21f24fe19961
--- /dev/null
+++ b/packages/create-llama/templates/types/streaming/fastapi/app/context.py
@@ -0,0 +1,11 @@
+import os
+
+from llama_index import ServiceContext
+from llama_index.llms import OpenAI
+
+
+def create_base_context():
+    model = os.getenv("MODEL", "gpt-3.5-turbo")
+    return ServiceContext.from_defaults(
+        llm=OpenAI(model=model),
+    )
diff --git a/packages/create-llama/templates/types/streaming/fastapi/app/engine/__init__.py b/packages/create-llama/templates/types/streaming/fastapi/app/engine/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/packages/create-llama/templates/types/streaming/fastapi/app/engine/index.py b/packages/create-llama/templates/types/streaming/fastapi/app/engine/index.py
new file mode 100644
index 0000000000000000000000000000000000000000..663b595a40c17f6090bb933bd73ea08ae71286de
--- /dev/null
+++ b/packages/create-llama/templates/types/streaming/fastapi/app/engine/index.py
@@ -0,0 +1,7 @@
+from llama_index.chat_engine import SimpleChatEngine
+
+from app.context import create_base_context
+
+
+def get_chat_engine():
+    return SimpleChatEngine.from_defaults(service_context=create_base_context())
diff --git a/packages/create-llama/templates/types/streaming/fastapi/app/utils/index.py b/packages/create-llama/templates/types/streaming/fastapi/app/utils/index.py
deleted file mode 100644
index cb16cdba37897fc203bdd0358c5f088628375a3f..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/fastapi/app/utils/index.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import logging
-import os
-
-from llama_index import (
-    SimpleDirectoryReader,
-    StorageContext,
-    VectorStoreIndex,
-    load_index_from_storage,
-    ServiceContext,
-)
-from llama_index.llms import OpenAI
-
-
-STORAGE_DIR = "./storage"  # directory to cache the generated index
-DATA_DIR = "./data"  # directory containing the documents to index
-
-service_context = ServiceContext.from_defaults(
-    llm=OpenAI(model="gpt-3.5-turbo")
-)
-
-def get_index():
-    logger = logging.getLogger("uvicorn")
-    # check if storage already exists
-    if not os.path.exists(STORAGE_DIR):
-        logger.info("Creating new index")
-        # load the documents and create the index
-        documents = SimpleDirectoryReader(DATA_DIR).load_data()
-        index = VectorStoreIndex.from_documents(documents,service_context=service_context)
-        # store it for later
-        index.storage_context.persist(STORAGE_DIR)
-        logger.info(f"Finished creating new index. Stored in {STORAGE_DIR}")
-    else:
-        # load the existing index
-        logger.info(f"Loading index from {STORAGE_DIR}...")
-        storage_context = StorageContext.from_defaults(persist_dir=STORAGE_DIR)
-        index = load_index_from_storage(storage_context,service_context=service_context)
-        logger.info(f"Finished loading index from {STORAGE_DIR}")
-    return index
diff --git a/packages/create-llama/templates/types/streaming/fastapi/app/utils/json.py b/packages/create-llama/templates/types/streaming/fastapi/app/utils/json.py
deleted file mode 100644
index d9a847f53e107f665389f11ec005795e0fb8c5b3..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/fastapi/app/utils/json.py
+++ /dev/null
@@ -1,22 +0,0 @@
-import json
-from typing import TypeVar
-from fastapi import HTTPException, Request
-
-from pydantic import BaseModel, ValidationError
-
-
-T = TypeVar("T", bound=BaseModel)
-
-
-def json_to_model(cls: T):
-    async def get_json(request: Request) -> T:
-        body = await request.body()
-        try:
-            data_dict = json.loads(body.decode("utf-8"))
-            return cls(**data_dict)
-        except (json.JSONDecodeError, ValidationError) as e:
-            raise HTTPException(
-                status_code=400, detail=f"Could not decode JSON: {str(e)}"
-            )
-
-    return get_json
diff --git a/packages/create-llama/templates/types/streaming/fastapi/main.py b/packages/create-llama/templates/types/streaming/fastapi/main.py
index 00cb79c44d58f4819243b8ee16c3108bef950382..ba56f0345bacc5ad73e4218a781bee57427e1ec9 100644
--- a/packages/create-llama/templates/types/streaming/fastapi/main.py
+++ b/packages/create-llama/templates/types/streaming/fastapi/main.py
@@ -1,4 +1,5 @@
 from dotenv import load_dotenv
+
 load_dotenv()
 
 import logging
diff --git a/packages/create-llama/templates/types/streaming/fastapi/pyproject.toml b/packages/create-llama/templates/types/streaming/fastapi/pyproject.toml
index f5b75b3cfdbe88001d4588faf27e07cec36212a9..f9bb9605b78f53417fd9f61e9e03812fdc21047e 100644
--- a/packages/create-llama/templates/types/streaming/fastapi/pyproject.toml
+++ b/packages/create-llama/templates/types/streaming/fastapi/pyproject.toml
@@ -1,5 +1,5 @@
 [tool.poetry]
-name = "llamaindex-fastapi-streaming"
+name = "app"
 version = "0.1.0"
 description = ""
 authors = ["Marcus Schiesser <mail@marcusschiesser.de>"]
@@ -9,7 +9,7 @@ readme = "README.md"
 python = "^3.11,<3.12"
 fastapi = "^0.104.1"
 uvicorn = { extras = ["standard"], version = "^0.23.2" }
-llama-index = "^0.8.56"
+llama-index = "^0.9.19"
 pypdf = "^3.17.0"
 python-dotenv = "^1.0.0"
 
diff --git a/packages/create-llama/templates/types/streaming/nextjs/app/api/chat/route.ts b/packages/create-llama/templates/types/streaming/nextjs/app/api/chat/route.ts
index 99fea0f618eb9b007ba0b7f8349522957e15ed92..ff00a3894e02727f3e2e6036127e29843bf0e122 100644
--- a/packages/create-llama/templates/types/streaming/nextjs/app/api/chat/route.ts
+++ b/packages/create-llama/templates/types/streaming/nextjs/app/api/chat/route.ts
@@ -1,4 +1,3 @@
-import { MODEL } from "@/constants";
 import { Message, StreamingTextResponse } from "ai";
 import { MessageContent, OpenAI } from "llamaindex";
 import { NextRequest, NextResponse } from "next/server";
@@ -43,7 +42,7 @@ export async function POST(request: NextRequest) {
     }
 
     const llm = new OpenAI({
-      model: MODEL,
+      model: process.env.MODEL || "gpt-3.5-turbo",
       maxTokens: 512,
     });
 
diff --git a/packages/create-llama/templates/types/streaming/nextjs/app/components/chat-section.tsx b/packages/create-llama/templates/types/streaming/nextjs/app/components/chat-section.tsx
index b42edb2731f33a4394027569416b26f214c90c13..e51eeef329741f7833b3119f9f44c6378921bb1d 100644
--- a/packages/create-llama/templates/types/streaming/nextjs/app/components/chat-section.tsx
+++ b/packages/create-llama/templates/types/streaming/nextjs/app/components/chat-section.tsx
@@ -1,6 +1,5 @@
 "use client";
 
-import { MODEL } from "@/constants";
 import { useChat } from "ai/react";
 import { ChatInput, ChatMessages } from "./ui/chat";
 
@@ -33,7 +32,7 @@ export default function ChatSection() {
         handleSubmit={handleSubmit}
         handleInputChange={handleInputChange}
         isLoading={isLoading}
-        multiModal={MODEL === "gpt-4-vision-preview"}
+        multiModal={process.env.NEXT_PUBLIC_MODEL === "gpt-4-vision-preview"}
       />
     </div>
   );
diff --git a/packages/create-llama/templates/types/streaming/nextjs/constants.ts b/packages/create-llama/templates/types/streaming/nextjs/constants.ts
deleted file mode 100644
index 0959a5f6f4f9301f2559cf5b04c36de0ef3afe4a..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/constants.ts
+++ /dev/null
@@ -1 +0,0 @@
-export const MODEL = "gpt-4-vision-preview";
diff --git a/packages/eslint-config-custom/index.js b/packages/eslint-config-custom/index.js
index dd76bc2bba54425133bec8fd031ce50ac01530fc..d46a122e9c09d53301912adb4e6872cf6ac40025 100644
--- a/packages/eslint-config-custom/index.js
+++ b/packages/eslint-config-custom/index.js
@@ -45,6 +45,8 @@ module.exports = {
           "https_proxy",
           "npm_config_user_agent",
           "NEXT_PUBLIC_CHAT_API",
+          "MODEL",
+          "NEXT_PUBLIC_MODEL",
         ],
       },
     ],
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index c68f43736929dfc09837a5ffaa3da695e24daf5f..e6b62e77cf94cb735e98b4ec437cf007f8248ac8 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -306,6 +306,9 @@ importers:
       prompts:
         specifier: 2.1.0
         version: 2.1.0
+      rimraf:
+        specifier: ^5.0.5
+        version: 5.0.5
       tar:
         specifier: 6.1.15
         version: 6.1.15
@@ -3461,6 +3464,18 @@ packages:
   /@humanwhocodes/object-schema@2.0.1:
     resolution: {integrity: sha512-dvuCeX5fC9dXgJn9t+X5atfmgQAzUOWqS1254Gh0m6i8wKd10ebXkfNKiRK+1GWi/yTvvLDHpoxLr0xxxeslWw==}
 
+  /@isaacs/cliui@8.0.2:
+    resolution: {integrity: sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==}
+    engines: {node: '>=12'}
+    dependencies:
+      string-width: 5.1.2
+      string-width-cjs: /string-width@4.2.3
+      strip-ansi: 7.1.0
+      strip-ansi-cjs: /strip-ansi@6.0.1
+      wrap-ansi: 8.1.0
+      wrap-ansi-cjs: /wrap-ansi@7.0.0
+    dev: true
+
   /@istanbuljs/load-nyc-config@1.1.0:
     resolution: {integrity: sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==}
     engines: {node: '>=8'}
@@ -3966,6 +3981,13 @@ packages:
       typescript: 4.9.5
     dev: false
 
+  /@pkgjs/parseargs@0.11.0:
+    resolution: {integrity: sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==}
+    engines: {node: '>=14'}
+    requiresBuild: true
+    dev: true
+    optional: true
+
   /@pkgr/utils@2.4.0:
     resolution: {integrity: sha512-2OCURAmRtdlL8iUDTypMrrxfwe8frXTeXaxGsVOaYtc/wrUyk8Z/0OBetM7cdlsy7ZFWlMX72VogKeh+A4Xcjw==}
     engines: {node: ^12.20.0 || ^14.18.0 || >=16.0.0}
@@ -5877,8 +5899,8 @@ packages:
     engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7}
     hasBin: true
     dependencies:
-      caniuse-lite: 1.0.30001570
-      electron-to-chromium: 1.4.615
+      caniuse-lite: 1.0.30001571
+      electron-to-chromium: 1.4.616
       node-releases: 2.0.14
       update-browserslist-db: 1.0.13(browserslist@4.22.2)
     dev: false
@@ -6079,8 +6101,8 @@ packages:
   /caniuse-lite@1.0.30001546:
     resolution: {integrity: sha512-zvtSJwuQFpewSyRrI3AsftF6rM0X80mZkChIt1spBGEvRglCrjTniXvinc8JKRoqTwXAgvqTImaN9igfSMtUBw==}
 
-  /caniuse-lite@1.0.30001570:
-    resolution: {integrity: sha512-+3e0ASu4sw1SWaoCtvPeyXp+5PsjigkSt8OXZbF9StH5pQWbxEjLAZE3n8Aup5udop1uRiKA7a4utUk/uoSpUw==}
+  /caniuse-lite@1.0.30001571:
+    resolution: {integrity: sha512-tYq/6MoXhdezDLFZuCO/TKboTzuQ/xR5cFdgXPfDtM7/kchBO3b4VWghE/OAi/DV7tTdhmLjZiZBZi1fA/GheQ==}
     dev: false
 
   /canvas@2.11.2:
@@ -7531,8 +7553,8 @@ packages:
   /electron-to-chromium@1.4.543:
     resolution: {integrity: sha512-t2ZP4AcGE0iKCCQCBx/K2426crYdxD3YU6l0uK2EO3FZH0pbC4pFz/sZm2ruZsND6hQBTcDWWlo/MLpiOdif5g==}
 
-  /electron-to-chromium@1.4.615:
-    resolution: {integrity: sha512-/bKPPcgZVUziECqDc+0HkT87+0zhaWSZHNXqF8FLd2lQcptpmUFwoCSWjCdOng9Gdq+afKArPdEg/0ZW461Eng==}
+  /electron-to-chromium@1.4.616:
+    resolution: {integrity: sha512-1n7zWYh8eS0L9Uy+GskE0lkBUNK83cXTVJI0pU3mGprFsbfSdAc15VTFbo+A+Bq4pwstmL30AVcEU3Fo463lNg==}
     dev: false
 
   /elliptic@6.5.4:
@@ -8623,6 +8645,14 @@ packages:
     dependencies:
       is-callable: 1.2.7
 
+  /foreground-child@3.1.1:
+    resolution: {integrity: sha512-TMKDUnIte6bfb5nWv7V/caI169OHgvwjb7V4WkeUvbQQdjr5rWKqHFiKWb/fcOwB+CzBT+qbWjvj+DVwRskpIg==}
+    engines: {node: '>=14'}
+    dependencies:
+      cross-spawn: 7.0.3
+      signal-exit: 4.1.0
+    dev: true
+
   /fork-ts-checker-webpack-plugin@6.5.3(eslint@8.54.0)(typescript@4.9.5)(webpack@5.88.2):
     resolution: {integrity: sha512-SbH/l9ikmMWycd5puHJKTkZJKddF4iRLyW3DeZ08HTI7NGyLS38MXd/KGgeWumQO7YNQbW2u/NtPT2YowbPaGQ==}
     engines: {node: '>=10', yarn: '>=1.0.0'}
@@ -8930,6 +8960,18 @@ packages:
   /glob-to-regexp@0.4.1:
     resolution: {integrity: sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==}
 
+  /glob@10.3.10:
+    resolution: {integrity: sha512-fa46+tv1Ak0UPK1TOy/pZrIybNNt4HCv7SDzwyfiOZkvZLEbjsZkJBPtDHVshZjbecAoAGSC20MjLDG/qr679g==}
+    engines: {node: '>=16 || 14 >=14.17'}
+    hasBin: true
+    dependencies:
+      foreground-child: 3.1.1
+      jackspeak: 2.3.6
+      minimatch: 9.0.3
+      minipass: 5.0.0
+      path-scurry: 1.10.1
+    dev: true
+
   /glob@7.1.6:
     resolution: {integrity: sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==}
     dependencies:
@@ -10200,6 +10242,15 @@ packages:
       istanbul-lib-report: 3.0.1
     dev: true
 
+  /jackspeak@2.3.6:
+    resolution: {integrity: sha512-N3yCS/NegsOBokc8GAdM8UcmfsKiSS8cipheD/nivzr700H+nsMOxJjQnvwOcRYVuFkdH0wGUvW2WbXGmrZGbQ==}
+    engines: {node: '>=14'}
+    dependencies:
+      '@isaacs/cliui': 8.0.2
+    optionalDependencies:
+      '@pkgjs/parseargs': 0.11.0
+    dev: true
+
   /jest-changed-files@29.7.0:
     resolution: {integrity: sha512-fEArFiwf1BpQ+4bXSprcDc3/x4HSzL4al2tozwVpDFpsxALjLYdyiIK4e5Vz66GQJIbXJ82+35PtysofptNX2w==}
     engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0}
@@ -11088,6 +11139,11 @@ packages:
     resolution: {integrity: sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA==}
     engines: {node: '>=8'}
 
+  /lru-cache@10.1.0:
+    resolution: {integrity: sha512-/1clY/ui8CzjKFyjdvwPWJUYKiFVXG2I2cY0ssG7h4+hwk+XOIX7ZSG9Q7TW8TW3Kp3BUSqgFWBLgL4PJ+Blag==}
+    engines: {node: 14 || >=16.14}
+    dev: true
+
   /lru-cache@4.1.5:
     resolution: {integrity: sha512-sWZlbEP2OsHNkXrMl5GYk/jKk70MBng6UU4YI/qGDYbgf6YbP4EvmqISbXCoJiRKs+1bSpFHVgQxvJ17F2li5g==}
     dependencies:
@@ -12296,6 +12352,14 @@ packages:
   /path-parse@1.0.7:
     resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==}
 
+  /path-scurry@1.10.1:
+    resolution: {integrity: sha512-MkhCqzzBEpPvxxQ71Md0b1Kk51W01lrYvlMzSUaIzNsODdd7mqhiimSZlr+VegAz5Z6Vzt9Xg2ttE//XBhH3EQ==}
+    engines: {node: '>=16 || 14 >=14.17'}
+    dependencies:
+      lru-cache: 10.1.0
+      minipass: 5.0.0
+    dev: true
+
   /path-to-regexp@0.1.7:
     resolution: {integrity: sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==}
     dev: false
@@ -13927,6 +13991,14 @@ packages:
     dependencies:
       glob: 7.2.3
 
+  /rimraf@5.0.5:
+    resolution: {integrity: sha512-CqDakW+hMe/Bz202FPEymy68P+G50RfMQK+Qo5YUqc9SPipvbGjCGKd0RSKEelbsfQuw3g5NZDSrlZZAJurH1A==}
+    engines: {node: '>=14'}
+    hasBin: true
+    dependencies:
+      glob: 10.3.10
+    dev: true
+
   /ripemd160@2.0.2:
     resolution: {integrity: sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA==}
     dependencies: