diff --git a/.changeset/hot-windows-watch.md b/.changeset/hot-windows-watch.md
deleted file mode 100644
index 0020daf70a1e9b1718faa971c97e9fa4023e5bd9..0000000000000000000000000000000000000000
--- a/.changeset/hot-windows-watch.md
+++ /dev/null
@@ -1,5 +0,0 @@
----
-"create-llama": patch
----
-
-fix missing .env value, improve docs and error message
diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml
deleted file mode 100644
index 70cda2ca69341075ec97d28f5614ed3992e804e1..0000000000000000000000000000000000000000
--- a/.github/workflows/e2e.yml
+++ /dev/null
@@ -1,68 +0,0 @@
-name: E2E Tests
-on:
-  push:
-    branches: [main]
-  pull_request:
-    paths:
-      - "packages/create-llama/**"
-      - ".github/workflows/e2e.yml"
-    branches: [main]
-
-env:
-  POETRY_VERSION: "1.6.1"
-
-jobs:
-  e2e:
-    name: create-llama
-    timeout-minutes: 60
-    strategy:
-      fail-fast: true
-      matrix:
-        node-version: [18, 20]
-        python-version: ["3.11"]
-        os: [macos-latest, windows-latest]
-    defaults:
-      run:
-        shell: bash
-    runs-on: ${{ matrix.os }}
-    steps:
-      - uses: actions/checkout@v4
-      - name: Set up python ${{ matrix.python-version }}
-        uses: actions/setup-python@v4
-        with:
-          python-version: ${{ matrix.python-version }}
-      - name: Install Poetry
-        uses: snok/install-poetry@v1
-        with:
-          version: ${{ env.POETRY_VERSION }}
-      - uses: pnpm/action-setup@v2
-      - name: Setup Node.js ${{ matrix.node-version }}
-        uses: actions/setup-node@v4
-        with:
-          node-version: ${{ matrix.node-version }}
-          cache: "pnpm"
-      - name: Install dependencies
-        run: pnpm install
-      - name: Install Playwright Browsers
-        run: pnpm exec playwright install --with-deps
-        working-directory: ./packages/create-llama
-      - name: Build create-llama
-        run: pnpm run build
-        working-directory: ./packages/create-llama
-      - name: Pack
-        run: pnpm pack --pack-destination ./output
-        working-directory: ./packages/create-llama
-      - name: Extract Pack
-        run: tar -xvzf ./output/*.tgz -C ./output
-        working-directory: ./packages/create-llama
-      - name: Run Playwright tests
-        run: pnpm exec playwright test
-        env:
-          OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
-        working-directory: ./packages/create-llama
-      - uses: actions/upload-artifact@v3
-        if: always()
-        with:
-          name: playwright-report
-          path: ./packages/create-llama/playwright-report/
-          retention-days: 30
diff --git a/.gitignore b/.gitignore
index 8d11dc46809a859859e1772fb96848460a2596a5..8f37f7c04cd5f7a55a0934350827511c5ab03208 100644
--- a/.gitignore
+++ b/.gitignore
@@ -45,7 +45,6 @@ playwright-report/
 blob-report/
 playwright/.cache/
 .tsbuildinfo
-packages/create-llama/e2e/cache
 
 # intellij
 **/.idea
diff --git a/.prettierignore b/.prettierignore
index d19028fa85d6bce0087d5524b9158a03e8f73ddd..5cbece915d98a3d2b6b299148214dd2976f574b0 100644
--- a/.prettierignore
+++ b/.prettierignore
@@ -4,4 +4,3 @@ pnpm-lock.yaml
 lib/
 dist/
 .docusaurus/
-packages/create-llama/e2e/cache/
\ No newline at end of file
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 2b4797aec22da8d3552a1c8708b15ac284ec76a1..8632eb698b25d2708a58b9c7d722b96e5f2e04a2 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -84,8 +84,7 @@ Any changes you make should be reflected in the browser. If you need to regenera
 To publish a new version of the library, run
 
 ```shell
-pnpm new-llamaindex
-pnpm new-create-llama
+pnpm new-version
 pnpm release
 git push # push to the main branch
 git push --tags
diff --git a/package.json b/package.json
index bc5660b56b9c05f7aa0a72b3ad8a300dbe74f922..dc6ca9cb0e58ef40a8a50b28a5d261b480ebcfe2 100644
--- a/package.json
+++ b/package.json
@@ -12,9 +12,7 @@
     "test": "turbo run test",
     "type-check": "tsc -b --diagnostics",
     "release": "pnpm run build:release && changeset publish",
-    "new-llamaindex": "pnpm run build:release && changeset version --ignore create-llama",
-    "new-create-llama": "pnpm run build:release && changeset version --ignore llamaindex --ignore @llamaindex/core-test --ignore @llamaindex/experimental",
-    "new-experimental": "pnpm run build:release && changeset version --ignore create-llama"
+    "new-version": "pnpm run build:release && changeset version"
   },
   "devDependencies": {
     "@changesets/cli": "^2.27.1",
diff --git a/packages/create-llama/.eslintrc b/packages/create-llama/.eslintrc
deleted file mode 100644
index fc6949f087a7257ff69649dcc594a22a49b9dd8c..0000000000000000000000000000000000000000
--- a/packages/create-llama/.eslintrc
+++ /dev/null
@@ -1,20 +0,0 @@
-{
-  "root": false,
-  "rules": {
-    "turbo/no-undeclared-env-vars": [
-      "error",
-      {
-        "allowList": [
-          "OPENAI_API_KEY",
-          "LLAMA_CLOUD_API_KEY",
-          "npm_config_user_agent",
-          "http_proxy",
-          "https_proxy",
-          "MODEL",
-          "NEXT_PUBLIC_CHAT_API",
-          "NEXT_PUBLIC_MODEL"
-        ]
-      }
-    ]
-  }
-}
diff --git a/packages/create-llama/CHANGELOG.md b/packages/create-llama/CHANGELOG.md
deleted file mode 100644
index f2b4f858f9bd00d9933faecdf21a5f808dd4fa9e..0000000000000000000000000000000000000000
--- a/packages/create-llama/CHANGELOG.md
+++ /dev/null
@@ -1,175 +0,0 @@
-# create-llama
-
-## 0.0.28
-
-### Patch Changes
-
-- 89a49f4: Add more config variables to .env file
-- fdf48dd: Add "Start in VSCode" option to postInstallAction
-- fdf48dd: Add devcontainers to generated code
-
-## 0.0.27
-
-### Patch Changes
-
-- 2d29350: Add LlamaParse option when selecting a pdf file or a folder (FastAPI only)
-- b354f23: Add embedding model option to create-llama (FastAPI only)
-
-## 0.0.26
-
-### Patch Changes
-
-- 09d532e: feat: generate llama pack project from llama index
-- cfdd6db: feat: add pinecone support to create llama
-- ef25d69: upgrade llama-index package to version v0.10.7 for create-llama app
-- 50dfd7b: update fastapi for CVE-2024-24762
-
-## 0.0.25
-
-### Patch Changes
-
-- d06a85b: Add option to create an agent by selecting tools (Google, Wikipedia)
-- 7b7329b: Added latest turbo models for GPT-3.5 and GPT 4
-
-## 0.0.24
-
-### Patch Changes
-
-- ba95ca3: Use condense plus context chat engine for FastAPI as default
-
-## 0.0.23
-
-### Patch Changes
-
-- c680af6: Fixed issues with locating templates path
-
-## 0.0.22
-
-### Patch Changes
-
-- 6dd401e: Add an option to provide an URL and chat with the website data (FastAPI only)
-- e9b87ef: Select a folder as data source and support more file types (.pdf, .doc, .docx, .xls, .xlsx, .csv)
-
-## 0.0.20
-
-### Patch Changes
-
-- 27d55fd: Add an option to provide an URL and chat with the website data
-
-## 0.0.19
-
-### Patch Changes
-
-- 3a29a80: Add node_modules to gitignore in Express backends
-- fe03aaa: feat: generate llama pack example
-
-## 0.0.18
-
-### Patch Changes
-
-- 88d3b41: fix packaging
-
-## 0.0.17
-
-### Patch Changes
-
-- fa17f7e: Add an option that allows the user to run the generated app
-- 9e5d8e1: Add an option to select a local PDF file as data source
-
-## 0.0.16
-
-### Patch Changes
-
-- a73942d: Fix: Bundle mongo dependency with NextJS
-- 9492cc6: Feat: Added option to automatically install dependencies (for Python and TS)
-- f74dea5: Feat: Show images in chat messages using GPT4 Vision (Express and NextJS only)
-
-## 0.0.15
-
-### Patch Changes
-
-- 8e124e5: feat: support showing image on chat message
-
-## 0.0.14
-
-### Patch Changes
-
-- 2e6b36e: fix: re-organize file structure
-- 2b356c8: fix: relative path incorrect
-
-## 0.0.13
-
-### Patch Changes
-
-- Added PostgreSQL vector store (for Typescript and Python)
-- Improved async handling in FastAPI
-
-## 0.0.12
-
-### Patch Changes
-
-- 9c5e22a: Added cross-env so frontends with Express/FastAPI backends are working under Windows
-- 5ab65eb: Bring Python templates with TS templates to feature parity
-- 9c5e22a: Added vector DB selector to create-llama (starting with MongoDB support)
-
-## 0.0.11
-
-### Patch Changes
-
-- 2aeb341: - Added option to create a new project based on community templates
-  - Added OpenAI model selector for NextJS projects
-  - Added GPT4 Vision support (and file upload)
-
-## 0.0.10
-
-### Patch Changes
-
-- Bugfixes (thanks @marcusschiesser)
-
-## 0.0.9
-
-### Patch Changes
-
-- acfe232: Deployment fixes (thanks @seldo)
-
-## 0.0.8
-
-### Patch Changes
-
-- 8cdb07f: Fix Next deployment (thanks @seldo and @marcusschiesser)
-
-## 0.0.7
-
-### Patch Changes
-
-- 9f9f293: Added more to README and made it easier to switch models (thanks @seldo)
-
-## 0.0.6
-
-### Patch Changes
-
-- 4431ec7: Label bug fix (thanks @marcusschiesser)
-
-## 0.0.5
-
-### Patch Changes
-
-- 25257f4: Fix issue where it doesn't find OpenAI Key when running npm run generate (#182) (thanks @RayFernando1337)
-
-## 0.0.4
-
-### Patch Changes
-
-- 031e926: Update create-llama readme (thanks @logan-markewich)
-
-## 0.0.3
-
-### Patch Changes
-
-- 91b42a3: change version (thanks @marcusschiesser)
-
-## 0.0.2
-
-### Patch Changes
-
-- e2a6805: Hello Create Llama (thanks @marcusschiesser)
diff --git a/packages/create-llama/LICENSE.md b/packages/create-llama/LICENSE.md
deleted file mode 100644
index c16e650c0ed5197b51ab46b3d4abfdee5069383b..0000000000000000000000000000000000000000
--- a/packages/create-llama/LICENSE.md
+++ /dev/null
@@ -1,9 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2023 LlamaIndex, Vercel, Inc.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/packages/create-llama/README.md b/packages/create-llama/README.md
deleted file mode 100644
index 1f6ba4777256c99e0335ac3a101979232c0591cb..0000000000000000000000000000000000000000
--- a/packages/create-llama/README.md
+++ /dev/null
@@ -1,126 +0,0 @@
-# Create LlamaIndex App
-
-The easiest way to get started with [LlamaIndex](https://www.llamaindex.ai/) is by using `create-llama`. This CLI tool enables you to quickly start building a new LlamaIndex application, with everything set up for you.
-
-Just run
-
-```bash
-npx create-llama@latest
-```
-
-to get started, or see below for more options. Once your app is generated, run
-
-```bash
-npm run dev
-```
-
-to start the development server. You can then visit [http://localhost:3000](http://localhost:3000) to see your app.
-
-## What you'll get
-
-- A Next.js-powered front-end. The app is set up as a chat interface that can answer questions about your data (see below)
-  - You can style it with HTML and CSS, or you can optionally use components from [shadcn/ui](https://ui.shadcn.com/)
-- Your choice of 3 back-ends:
-  - **Next.js**: if you select this option, you’ll have a full stack Next.js application that you can deploy to a host like [Vercel](https://vercel.com/) in just a few clicks. This uses [LlamaIndex.TS](https://www.npmjs.com/package/llamaindex), our TypeScript library.
-  - **Express**: if you want a more traditional Node.js application you can generate an Express backend. This also uses LlamaIndex.TS.
-  - **Python FastAPI**: if you select this option you’ll get a backend powered by the [llama-index python package](https://pypi.org/project/llama-index/), which you can deploy to a service like Render or fly.io.
-- The back-end has a single endpoint that allows you to send the state of your chat and receive additional responses
-- You can choose whether you want a streaming or non-streaming back-end (if you're not sure, we recommend streaming)
-- You can choose whether you want to use `ContextChatEngine` or `SimpleChatEngine`
-  - `SimpleChatEngine` will just talk to the LLM directly without using your data
-  - `ContextChatEngine` will use your data to answer questions (see below).
-- The app uses OpenAI by default, so you'll need an OpenAI API key, or you can customize it to use any of the dozens of LLMs we support.
-
-## Using your data
-
-If you've enabled `ContextChatEngine`, you can supply your own data and the app will index it and answer questions. Your generated app will have a folder called `data`:
-
-- With the Next.js backend this is `./data`
-- With the Express or Python backend this is in `./backend/data`
-
-The app will ingest any supported files you put in this directory. Your Next.js and Express apps use LlamaIndex.TS so they will be able to ingest any PDF, text, CSV, Markdown, Word and HTML files. The Python backend can read even more types, including video and audio files.
-
-Before you can use your data, you need to index it. If you're using the Next.js or Express apps, run:
-
-```bash
-npm run generate
-```
-
-Then re-start your app. Remember you'll need to re-run `generate` if you add new files to your `data` folder. If you're using the Python backend, you can trigger indexing of your data by deleting the `./storage` folder and re-starting the app.
-
-## Don't want a front-end?
-
-It's optional! If you've selected the Python or Express back-ends, just delete the `frontend` folder and you'll get an API without any front-end code.
-
-## Customizing the LLM
-
-By default the app will use OpenAI's gpt-3.5-turbo model. If you want to use GPT-4, you can modify this by editing a file:
-
-- In the Next.js backend, edit `./app/api/chat/route.ts` and replace `gpt-3.5-turbo` with `gpt-4`
-- In the Express backend, edit `./backend/src/controllers/chat.controller.ts` and likewise replace `gpt-3.5-turbo` with `gpt-4`
-- In the Python backend, edit `./backend/app/utils/index.py` and once again replace `gpt-3.5-turbo` with `gpt-4`
-
-You can also replace OpenAI with one of our [dozens of other supported LLMs](https://docs.llamaindex.ai/en/stable/module_guides/models/llms/modules.html).
-
-## Example
-
-The simplest thing to do is run `create-llama` in interactive mode:
-
-```bash
-npx create-llama@latest
-# or
-npm create llama@latest
-# or
-yarn create llama
-# or
-pnpm create llama@latest
-```
-
-You will be asked for the name of your project, along with other configuration options, something like this:
-
-```bash
->> npm create llama@latest
-Need to install the following packages:
-  create-llama@latest
-Ok to proceed? (y) y
-✔ What is your project named? … my-app
-✔ Which template would you like to use? › Chat with streaming
-✔ Which framework would you like to use? › NextJS
-✔ Which UI would you like to use? › Just HTML
-✔ Which chat engine would you like to use? › ContextChatEngine
-✔ Please provide your OpenAI API key (leave blank to skip): …
-✔ Would you like to use ESLint? … No / Yes
-Creating a new LlamaIndex app in /home/my-app.
-```
-
-### Running non-interactively
-
-You can also pass command line arguments to set up a new project
-non-interactively. See `create-llama --help`:
-
-```bash
-create-llama <project-directory> [options]
-
-Options:
-  -V, --version                      output the version number
-
-  --use-npm
-
-    Explicitly tell the CLI to bootstrap the app using npm
-
-  --use-pnpm
-
-    Explicitly tell the CLI to bootstrap the app using pnpm
-
-  --use-yarn
-
-    Explicitly tell the CLI to bootstrap the app using Yarn
-
-```
-
-## LlamaIndex Documentation
-
-- [TS/JS docs](https://ts.llamaindex.ai/)
-- [Python docs](https://docs.llamaindex.ai/en/stable/)
-
-Inspired by and adapted from [create-next-app](https://github.com/vercel/next.js/tree/canary/packages/create-next-app)
diff --git a/packages/create-llama/create-app.ts b/packages/create-llama/create-app.ts
deleted file mode 100644
index 8d6ce9c5089062f2c451927fe283344743a29bc6..0000000000000000000000000000000000000000
--- a/packages/create-llama/create-app.ts
+++ /dev/null
@@ -1,147 +0,0 @@
-/* eslint-disable import/no-extraneous-dependencies */
-import path from "path";
-import { green, yellow } from "picocolors";
-import { tryGitInit } from "./helpers/git";
-import { isFolderEmpty } from "./helpers/is-folder-empty";
-import { getOnline } from "./helpers/is-online";
-import { isWriteable } from "./helpers/is-writeable";
-import { makeDir } from "./helpers/make-dir";
-
-import fs from "fs";
-import terminalLink from "terminal-link";
-import type { InstallTemplateArgs } from "./helpers";
-import { installTemplate } from "./helpers";
-import { writeDevcontainer } from "./helpers/devcontainer";
-import { templatesDir } from "./helpers/dir";
-import { toolsRequireConfig } from "./helpers/tools";
-
-export type InstallAppArgs = Omit<
-  InstallTemplateArgs,
-  "appName" | "root" | "isOnline" | "customApiPath"
-> & {
-  appPath: string;
-  frontend: boolean;
-};
-
-export async function createApp({
-  template,
-  framework,
-  engine,
-  ui,
-  appPath,
-  packageManager,
-  eslint,
-  frontend,
-  openAiKey,
-  llamaCloudKey,
-  model,
-  embeddingModel,
-  communityProjectPath,
-  llamapack,
-  vectorDb,
-  externalPort,
-  postInstallAction,
-  dataSource,
-  tools,
-}: InstallAppArgs): Promise<void> {
-  const root = path.resolve(appPath);
-
-  if (!(await isWriteable(path.dirname(root)))) {
-    console.error(
-      "The application path is not writable, please check folder permissions and try again.",
-    );
-    console.error(
-      "It is likely you do not have write permissions for this folder.",
-    );
-    process.exit(1);
-  }
-
-  const appName = path.basename(root);
-
-  await makeDir(root);
-  if (!isFolderEmpty(root, appName)) {
-    process.exit(1);
-  }
-
-  const useYarn = packageManager === "yarn";
-  const isOnline = !useYarn || (await getOnline());
-
-  console.log(`Creating a new LlamaIndex app in ${green(root)}.`);
-  console.log();
-
-  const args = {
-    appName,
-    root,
-    template,
-    framework,
-    engine,
-    ui,
-    packageManager,
-    isOnline,
-    eslint,
-    openAiKey,
-    llamaCloudKey,
-    model,
-    embeddingModel,
-    communityProjectPath,
-    llamapack,
-    vectorDb,
-    externalPort,
-    postInstallAction,
-    dataSource,
-    tools,
-  };
-
-  if (frontend) {
-    // install backend
-    const backendRoot = path.join(root, "backend");
-    await makeDir(backendRoot);
-    await installTemplate({ ...args, root: backendRoot, backend: true });
-    // install frontend
-    const frontendRoot = path.join(root, "frontend");
-    await makeDir(frontendRoot);
-    await installTemplate({
-      ...args,
-      root: frontendRoot,
-      framework: "nextjs",
-      customApiPath: `http://localhost:${externalPort ?? 8000}/api/chat`,
-      backend: false,
-    });
-    // copy readme for fullstack
-    await fs.promises.copyFile(
-      path.join(templatesDir, "README-fullstack.md"),
-      path.join(root, "README.md"),
-    );
-  } else {
-    await installTemplate({ ...args, backend: true });
-  }
-
-  await writeDevcontainer(root, templatesDir, framework, frontend);
-
-  process.chdir(root);
-  if (tryGitInit(root)) {
-    console.log("Initialized a git repository.");
-    console.log();
-  }
-
-  if (toolsRequireConfig(tools)) {
-    console.log(
-      yellow(
-        `You have selected tools that require configuration. Please configure them in the ${terminalLink(
-          "tools_config.json",
-          `file://${root}/tools_config.json`,
-        )} file.`,
-      ),
-    );
-  }
-  console.log("");
-  console.log(`${green("Success!")} Created ${appName} at ${appPath}`);
-
-  console.log(
-    `Now have a look at the ${terminalLink(
-      "README.md",
-      `file://${root}/README.md`,
-    )} and learn how to get started.`,
-  );
-  console.log();
-}
diff --git a/packages/create-llama/e2e/basic.spec.ts b/packages/create-llama/e2e/basic.spec.ts
deleted file mode 100644
index 93e3edfb9caa47b0d21fa3777c392f7253287b10..0000000000000000000000000000000000000000
--- a/packages/create-llama/e2e/basic.spec.ts
+++ /dev/null
@@ -1,145 +0,0 @@
-/* eslint-disable turbo/no-undeclared-env-vars */
-import { expect, test } from "@playwright/test";
-import { ChildProcess } from "child_process";
-import fs from "fs";
-import path from "path";
-import type {
-  TemplateEngine,
-  TemplateFramework,
-  TemplatePostInstallAction,
-  TemplateType,
-  TemplateUI,
-} from "../helpers";
-import { createTestDir, runCreateLlama, type AppType } from "./utils";
-
-const templateTypes: TemplateType[] = ["streaming", "simple"];
-const templateFrameworks: TemplateFramework[] = [
-  "nextjs",
-  "express",
-  "fastapi",
-];
-const templateEngines: TemplateEngine[] = ["simple", "context"];
-const templateUIs: TemplateUI[] = ["shadcn", "html"];
-const templatePostInstallActions: TemplatePostInstallAction[] = [
-  "none",
-  "runApp",
-];
-
-for (const templateType of templateTypes) {
-  for (const templateFramework of templateFrameworks) {
-    for (const templateEngine of templateEngines) {
-      for (const templateUI of templateUIs) {
-        for (const templatePostInstallAction of templatePostInstallActions) {
-          if (templateFramework === "nextjs" && templateType === "simple") {
-            // nextjs doesn't support simple templates - skip tests
-            continue;
-          }
-          const appType: AppType =
-            templateFramework === "express" || templateFramework === "fastapi"
-              ? templateType === "simple"
-                ? "--no-frontend" // simple templates don't have frontends
-                : "--frontend"
-              : "";
-          if (appType === "--no-frontend" && templateUI !== "html") {
-            // if there's no frontend, don't iterate over UIs
-            continue;
-          }
-          test.describe(`try create-llama ${templateType} ${templateFramework} ${templateEngine} ${templateUI} ${appType} ${templatePostInstallAction}`, async () => {
-            let port: number;
-            let externalPort: number;
-            let cwd: string;
-            let name: string;
-            let appProcess: ChildProcess;
-            // Only test without using vector db for now
-            const vectorDb = "none";
-
-            test.beforeAll(async () => {
-              port = Math.floor(Math.random() * 10000) + 10000;
-              externalPort = port + 1;
-              cwd = await createTestDir();
-              const result = await runCreateLlama(
-                cwd,
-                templateType,
-                templateFramework,
-                templateEngine,
-                templateUI,
-                vectorDb,
-                appType,
-                port,
-                externalPort,
-                templatePostInstallAction,
-              );
-              name = result.projectName;
-              appProcess = result.appProcess;
-            });
-
-            test("App folder should exist", async () => {
-              const dirExists = fs.existsSync(path.join(cwd, name));
-              expect(dirExists).toBeTruthy();
-            });
-            test("Frontend should have a title", async ({ page }) => {
-              test.skip(templatePostInstallAction !== "runApp");
-              test.skip(appType === "--no-frontend");
-              await page.goto(`http://localhost:${port}`);
-              await expect(page.getByText("Built by LlamaIndex")).toBeVisible();
-            });
-
-            test("Frontend should be able to submit a message and receive a response", async ({
-              page,
-            }) => {
-              test.skip(templatePostInstallAction !== "runApp");
-              test.skip(appType === "--no-frontend");
-              await page.goto(`http://localhost:${port}`);
-              await page.fill("form input", "hello");
-              const [response] = await Promise.all([
-                page.waitForResponse(
-                  (res) => {
-                    return (
-                      res.url().includes("/api/chat") && res.status() === 200
-                    );
-                  },
-                  {
-                    timeout: 1000 * 60,
-                  },
-                ),
-                page.click("form button[type=submit]"),
-              ]);
-              const text = await response.text();
-              console.log("AI response when submitting message: ", text);
-              expect(response.ok()).toBeTruthy();
-            });
-
-            test("Backend should response when calling API", async ({
-              request,
-            }) => {
-              test.skip(templatePostInstallAction !== "runApp");
-              test.skip(appType !== "--no-frontend");
-              const backendPort = appType === "" ? port : externalPort;
-              const response = await request.post(
-                `http://localhost:${backendPort}/api/chat`,
-                {
-                  data: {
-                    messages: [
-                      {
-                        role: "user",
-                        content: "Hello",
-                      },
-                    ],
-                  },
-                },
-              );
-              const text = await response.text();
-              console.log("AI response when calling API: ", text);
-              expect(response.ok()).toBeTruthy();
-            });
-
-            // clean processes
-            test.afterAll(async () => {
-              appProcess?.kill();
-            });
-          });
-        }
-      }
-    }
-  }
-}
diff --git a/packages/create-llama/e2e/tsconfig.json b/packages/create-llama/e2e/tsconfig.json
deleted file mode 100644
index d2ea7ec52bffa0c242811ee8fac79c9d58e75030..0000000000000000000000000000000000000000
--- a/packages/create-llama/e2e/tsconfig.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
-  "compilerOptions": {
-    "target": "es2019",
-    "module": "esnext",
-    "moduleResolution": "node",
-    "strict": true,
-    "resolveJsonModule": true,
-    "skipLibCheck": true,
-    "declaration": false,
-    "esModuleInterop": true,
-    "forceConsistentCasingInFileNames": true,
-    "incremental": true,
-    "tsBuildInfoFile": "./lib/.tsbuildinfo"
-  },
-  "include": ["./**/*.ts"]
-}
diff --git a/packages/create-llama/e2e/utils.ts b/packages/create-llama/e2e/utils.ts
deleted file mode 100644
index 03b69d07efcfd414725a080488512f8c7b2c9771..0000000000000000000000000000000000000000
--- a/packages/create-llama/e2e/utils.ts
+++ /dev/null
@@ -1,181 +0,0 @@
-import { ChildProcess, exec } from "child_process";
-import crypto from "node:crypto";
-import { mkdir } from "node:fs/promises";
-import * as path from "path";
-import waitPort from "wait-port";
-import {
-  TemplateEngine,
-  TemplateFramework,
-  TemplatePostInstallAction,
-  TemplateType,
-  TemplateUI,
-  TemplateVectorDB,
-} from "../helpers";
-
-export type AppType = "--frontend" | "--no-frontend" | "";
-const MODEL = "gpt-3.5-turbo";
-const EMBEDDING_MODEL = "text-embedding-ada-002";
-export type CreateLlamaResult = {
-  projectName: string;
-  appProcess: ChildProcess;
-};
-
-// eslint-disable-next-line max-params
-export async function checkAppHasStarted(
-  frontend: boolean,
-  framework: TemplateFramework,
-  port: number,
-  externalPort: number,
-  timeout: number,
-) {
-  if (frontend) {
-    await Promise.all([
-      waitPort({
-        host: "localhost",
-        port: port,
-        timeout,
-      }),
-      waitPort({
-        host: "localhost",
-        port: externalPort,
-        timeout,
-      }),
-    ]).catch((err) => {
-      console.error(err);
-      throw err;
-    });
-  } else {
-    let wPort: number;
-    if (framework === "nextjs") {
-      wPort = port;
-    } else {
-      wPort = externalPort;
-    }
-    await waitPort({
-      host: "localhost",
-      port: wPort,
-      timeout,
-    }).catch((err) => {
-      console.error(err);
-      throw err;
-    });
-  }
-}
-
-// eslint-disable-next-line max-params
-export async function runCreateLlama(
-  cwd: string,
-  templateType: TemplateType,
-  templateFramework: TemplateFramework,
-  templateEngine: TemplateEngine,
-  templateUI: TemplateUI,
-  vectorDb: TemplateVectorDB,
-  appType: AppType,
-  port: number,
-  externalPort: number,
-  postInstallAction: TemplatePostInstallAction,
-): Promise<CreateLlamaResult> {
-  const createLlama = path.join(
-    __dirname,
-    "..",
-    "output",
-    "package",
-    "dist",
-    "index.js",
-  );
-
-  const name = [
-    templateType,
-    templateFramework,
-    templateEngine,
-    templateUI,
-    appType,
-  ].join("-");
-  const command = [
-    "node",
-    createLlama,
-    name,
-    "--template",
-    templateType,
-    "--framework",
-    templateFramework,
-    "--engine",
-    templateEngine,
-    "--ui",
-    templateUI,
-    "--vector-db",
-    vectorDb,
-    "--model",
-    MODEL,
-    "--embedding-model",
-    EMBEDDING_MODEL,
-    "--open-ai-key",
-    process.env.OPENAI_API_KEY || "testKey",
-    appType,
-    "--eslint",
-    "--use-npm",
-    "--port",
-    port,
-    "--external-port",
-    externalPort,
-    "--post-install-action",
-    postInstallAction,
-    "--tools",
-    "none",
-    "--no-llama-parse",
-  ].join(" ");
-  console.log(`running command '${command}' in ${cwd}`);
-  const appProcess = exec(command, {
-    cwd,
-    env: {
-      ...process.env,
-    },
-  });
-  appProcess.stderr?.on("data", (data) => {
-    console.log(data.toString());
-  });
-  appProcess.on("exit", (code) => {
-    if (code !== 0 && code !== null) {
-      throw new Error(`create-llama command was failed!`);
-    }
-  });
-
-  // Wait for app to start
-  if (postInstallAction === "runApp") {
-    await checkAppHasStarted(
-      appType === "--frontend",
-      templateFramework,
-      port,
-      externalPort,
-      1000 * 60 * 5,
-    );
-  } else {
-    // wait create-llama to exit
-    // we don't test install dependencies for now, so just set timeout for 10 seconds
-    await new Promise((resolve, reject) => {
-      const timeout = setTimeout(() => {
-        reject(new Error("create-llama timeout error"));
-      }, 1000 * 10);
-      appProcess.on("exit", (code) => {
-        if (code !== 0 && code !== null) {
-          clearTimeout(timeout);
-          reject(new Error("create-llama command was failed!"));
-        } else {
-          clearTimeout(timeout);
-          resolve(undefined);
-        }
-      });
-    });
-  }
-
-  return {
-    projectName: name,
-    appProcess,
-  };
-}
-
-export async function createTestDir() {
-  const cwd = path.join(__dirname, "cache", crypto.randomUUID());
-  await mkdir(cwd, { recursive: true });
-  return cwd;
-}
diff --git a/packages/create-llama/helpers/constant.ts b/packages/create-llama/helpers/constant.ts
deleted file mode 100644
index 64f01be676867ca0b2cf781a50aea162dec8ac4d..0000000000000000000000000000000000000000
--- a/packages/create-llama/helpers/constant.ts
+++ /dev/null
@@ -1,6 +0,0 @@
-export const COMMUNITY_OWNER = "run-llama";
-export const COMMUNITY_REPO = "create_llama_projects";
-export const LLAMA_PACK_OWNER = "run-llama";
-export const LLAMA_PACK_REPO = "llama_index";
-export const LLAMA_PACK_FOLDER = "llama-index-packs";
-export const LLAMA_PACK_FOLDER_PATH = `${LLAMA_PACK_OWNER}/${LLAMA_PACK_REPO}/main/${LLAMA_PACK_FOLDER}`;
diff --git a/packages/create-llama/helpers/copy.ts b/packages/create-llama/helpers/copy.ts
deleted file mode 100644
index a5b722ba34fe41e867e686f10b1bdf725c58c0a0..0000000000000000000000000000000000000000
--- a/packages/create-llama/helpers/copy.ts
+++ /dev/null
@@ -1,50 +0,0 @@
-/* eslint-disable import/no-extraneous-dependencies */
-import { async as glob } from "fast-glob";
-import fs from "fs";
-import path from "path";
-
-interface CopyOption {
-  cwd?: string;
-  rename?: (basename: string) => string;
-  parents?: boolean;
-}
-
-const identity = (x: string) => x;
-
-export const copy = async (
-  src: string | string[],
-  dest: string,
-  { cwd, rename = identity, parents = true }: CopyOption = {},
-) => {
-  const source = typeof src === "string" ? [src] : src;
-
-  if (source.length === 0 || !dest) {
-    throw new TypeError("`src` and `dest` are required");
-  }
-
-  const sourceFiles = await glob(source, {
-    cwd,
-    dot: true,
-    absolute: false,
-    stats: false,
-  });
-
-  const destRelativeToCwd = cwd ? path.resolve(cwd, dest) : dest;
-
-  return Promise.all(
-    sourceFiles.map(async (p) => {
-      const dirname = path.dirname(p);
-      const basename = rename(path.basename(p));
-
-      const from = cwd ? path.resolve(cwd, p) : p;
-      const to = parents
-        ? path.join(destRelativeToCwd, dirname, basename)
-        : path.join(destRelativeToCwd, basename);
-
-      // Ensure the destination directory exists
-      await fs.promises.mkdir(path.dirname(to), { recursive: true });
-
-      return fs.promises.copyFile(from, to);
-    }),
-  );
-};
diff --git a/packages/create-llama/helpers/devcontainer.ts b/packages/create-llama/helpers/devcontainer.ts
deleted file mode 100644
index cb008b97736fa25b55f1ca443438d91017e0aa67..0000000000000000000000000000000000000000
--- a/packages/create-llama/helpers/devcontainer.ts
+++ /dev/null
@@ -1,61 +0,0 @@
-import fs from "fs";
-import path from "path";
-import { TemplateFramework } from "./types";
-
-function renderDevcontainerContent(
-  templatesDir: string,
-  framework: TemplateFramework,
-  frontend: boolean,
-) {
-  const devcontainerJson: any = JSON.parse(
-    fs.readFileSync(path.join(templatesDir, "devcontainer.json"), "utf8"),
-  );
-
-  // Modify postCreateCommand
-  if (frontend) {
-    devcontainerJson.postCreateCommand =
-      framework === "fastapi"
-        ? "cd backend && poetry install && cd ../frontend && npm install"
-        : "cd backend && npm install && cd ../frontend && npm install";
-  } else {
-    devcontainerJson.postCreateCommand =
-      framework === "fastapi" ? "poetry install" : "npm install";
-  }
-
-  // Modify containerEnv
-  if (framework === "fastapi") {
-    if (frontend) {
-      devcontainerJson.containerEnv = {
-        ...devcontainerJson.containerEnv,
-        PYTHONPATH: "${PYTHONPATH}:${workspaceFolder}/backend",
-      };
-    } else {
-      devcontainerJson.containerEnv = {
-        ...devcontainerJson.containerEnv,
-        PYTHONPATH: "${PYTHONPATH}:${workspaceFolder}",
-      };
-    }
-  }
-
-  return JSON.stringify(devcontainerJson, null, 2);
-}
-
-export const writeDevcontainer = async (
-  root: string,
-  templatesDir: string,
-  framework: TemplateFramework,
-  frontend: boolean,
-) => {
-  console.log("Adding .devcontainer");
-  const devcontainerContent = renderDevcontainerContent(
-    templatesDir,
-    framework,
-    frontend,
-  );
-  const devcontainerDir = path.join(root, ".devcontainer");
-  fs.mkdirSync(devcontainerDir);
-  await fs.promises.writeFile(
-    path.join(devcontainerDir, "devcontainer.json"),
-    devcontainerContent,
-  );
-};
diff --git a/packages/create-llama/helpers/dir.ts b/packages/create-llama/helpers/dir.ts
deleted file mode 100644
index 31ded64cf3ac6ec88f89fcb582859a6eaf8422c0..0000000000000000000000000000000000000000
--- a/packages/create-llama/helpers/dir.ts
+++ /dev/null
@@ -1,3 +0,0 @@
-import path from "path";
-
-export const templatesDir = path.join(__dirname, "..", "templates");
diff --git a/packages/create-llama/helpers/env-variables.ts b/packages/create-llama/helpers/env-variables.ts
deleted file mode 100644
index f9f6d370a44962efec562c65004d17c5f5aae807..0000000000000000000000000000000000000000
--- a/packages/create-llama/helpers/env-variables.ts
+++ /dev/null
@@ -1,242 +0,0 @@
-import fs from "fs/promises";
-import path from "path";
-import {
-  FileSourceConfig,
-  TemplateDataSource,
-  TemplateFramework,
-  TemplateVectorDB,
-} from "./types";
-
-type EnvVar = {
-  name?: string;
-  description?: string;
-  value?: string;
-};
-
-const renderEnvVar = (envVars: EnvVar[]): string => {
-  return envVars.reduce(
-    (prev, env) =>
-      prev +
-      (env.description
-        ? `# ${env.description.replaceAll("\n", "\n# ")}\n`
-        : "") +
-      (env.name
-        ? env.value
-          ? `${env.name}=${env.value}\n\n`
-          : `# ${env.name}=\n\n`
-        : ""),
-    "",
-  );
-};
-
-const getVectorDBEnvs = (vectorDb: TemplateVectorDB) => {
-  switch (vectorDb) {
-    case "mongo":
-      return [
-        {
-          name: "MONGO_URI",
-          description:
-            "For generating a connection URI, see https://docs.timescale.com/use-timescale/latest/services/create-a-service\nThe MongoDB connection URI.",
-        },
-        {
-          name: "MONGODB_DATABASE",
-        },
-        {
-          name: "MONGODB_VECTORS",
-        },
-        {
-          name: "MONGODB_VECTOR_INDEX",
-        },
-      ];
-    case "pg":
-      return [
-        {
-          name: "PG_CONNECTION_STRING",
-          description:
-            "For generating a connection URI, see https://docs.timescale.com/use-timescale/latest/services/create-a-service\nThe PostgreSQL connection string.",
-        },
-      ];
-
-    case "pinecone":
-      return [
-        {
-          name: "PINECONE_API_KEY",
-          description:
-            "Configuration for Pinecone vector store\nThe Pinecone API key.",
-        },
-        {
-          name: "PINECONE_ENVIRONMENT",
-        },
-        {
-          name: "PINECONE_INDEX_NAME",
-        },
-      ];
-    default:
-      return [];
-  }
-};
-
-const getDataSourceEnvs = (dataSource: TemplateDataSource) => {
-  switch (dataSource.type) {
-    case "web":
-      return [
-        {
-          name: "BASE_URL",
-          description: "The base URL to start web scraping.",
-        },
-        {
-          name: "URL_PREFIX",
-          description: "The prefix of the URL to start web scraping.",
-        },
-        {
-          name: "MAX_DEPTH",
-          description: "The maximum depth to scrape.",
-        },
-      ];
-    default:
-      return [];
-  }
-};
-
-export const createBackendEnvFile = async (
-  root: string,
-  opts: {
-    openAiKey?: string;
-    llamaCloudKey?: string;
-    vectorDb?: TemplateVectorDB;
-    model?: string;
-    embeddingModel?: string;
-    framework?: TemplateFramework;
-    dataSource?: TemplateDataSource;
-    port?: number;
-  },
-) => {
-  // Init env values
-  const envFileName = ".env";
-  const defaultEnvs = [
-    {
-      render: true,
-      name: "MODEL",
-      description: "The name of LLM model to use.",
-      value: opts.model || "gpt-3.5-turbo",
-    },
-    {
-      render: true,
-      name: "OPENAI_API_KEY",
-      description: "The OpenAI API key to use.",
-      value: opts.openAiKey,
-    },
-    // Add vector database environment variables
-    ...(opts.vectorDb ? getVectorDBEnvs(opts.vectorDb) : []),
-    // Add data source environment variables
-    ...(opts.dataSource ? getDataSourceEnvs(opts.dataSource) : []),
-  ];
-  let envVars: EnvVar[] = [];
-  if (opts.framework === "fastapi") {
-    envVars = [
-      ...defaultEnvs,
-      ...[
-        {
-          name: "APP_HOST",
-          description: "The address to start the backend app.",
-          value: "0.0.0.0",
-        },
-        {
-          name: "APP_PORT",
-          description: "The port to start the backend app.",
-          value: opts.port?.toString() || "8000",
-        },
-        {
-          name: "EMBEDDING_MODEL",
-          description: "Name of the embedding model to use.",
-          value: opts.embeddingModel,
-        },
-        {
-          name: "EMBEDDING_DIM",
-          description: "Dimension of the embedding model to use.",
-        },
-        {
-          name: "LLM_TEMPERATURE",
-          description: "Temperature for sampling from the model.",
-        },
-        {
-          name: "LLM_MAX_TOKENS",
-          description: "Maximum number of tokens to generate.",
-        },
-        {
-          name: "TOP_K",
-          description:
-            "The number of similar embeddings to return when retrieving documents.",
-          value: "3",
-        },
-        {
-          name: "SYSTEM_PROMPT",
-          description: `Custom system prompt.
-Example:
-SYSTEM_PROMPT="
-We have provided context information below.
----------------------
-{context_str}
----------------------
-Given this information, please answer the question: {query_str}
-"`,
-        },
-        (opts?.dataSource?.config as FileSourceConfig).useLlamaParse
-          ? {
-              name: "LLAMA_CLOUD_API_KEY",
-              description: `The Llama Cloud API key.`,
-              value: opts.llamaCloudKey,
-            }
-          : {},
-      ],
-    ];
-  } else {
-    envVars = [
-      ...defaultEnvs,
-      ...[
-        opts.framework === "nextjs"
-          ? {
-              name: "NEXT_PUBLIC_MODEL",
-              description:
-                "The LLM model to use (hardcode to front-end artifact).",
-              value: opts.model || "gpt-3.5-turbo",
-            }
-          : {},
-      ],
-    ];
-  }
-  // Render and write env file
-  const content = renderEnvVar(envVars);
-  await fs.writeFile(path.join(root, envFileName), content);
-  console.log(`Created '${envFileName}' file. Please check the settings.`);
-};
-
-export const createFrontendEnvFile = async (
-  root: string,
-  opts: {
-    customApiPath?: string;
-    model?: string;
-  },
-) => {
-  const defaultFrontendEnvs = [
-    {
-      name: "MODEL",
-      description: "The OpenAI model to use.",
-      value: opts.model,
-    },
-    {
-      name: "NEXT_PUBLIC_MODEL",
-      description: "The OpenAI model to use (hardcode to front-end artifact).",
-      value: opts.model,
-    },
-    {
-      name: "NEXT_PUBLIC_CHAT_API",
-      description: "The backend API for chat endpoint.",
-      value: opts.customApiPath
-        ? opts.customApiPath
-        : "http://localhost:8000/api/chat",
-    },
-  ];
-  const content = renderEnvVar(defaultFrontendEnvs);
-  await fs.writeFile(path.join(root, ".env"), content);
-};
diff --git a/packages/create-llama/helpers/get-pkg-manager.ts b/packages/create-llama/helpers/get-pkg-manager.ts
deleted file mode 100644
index 0187c88a49e67bf7aa7fe95520b3af91777987f8..0000000000000000000000000000000000000000
--- a/packages/create-llama/helpers/get-pkg-manager.ts
+++ /dev/null
@@ -1,15 +0,0 @@
-export type PackageManager = "npm" | "pnpm" | "yarn";
-
-export function getPkgManager(): PackageManager {
-  const userAgent = process.env.npm_config_user_agent || "";
-
-  if (userAgent.startsWith("yarn")) {
-    return "yarn";
-  }
-
-  if (userAgent.startsWith("pnpm")) {
-    return "pnpm";
-  }
-
-  return "npm";
-}
diff --git a/packages/create-llama/helpers/git.ts b/packages/create-llama/helpers/git.ts
deleted file mode 100644
index 2cdfe8dc34baefd0c092bbefe6fe071bd4729e0c..0000000000000000000000000000000000000000
--- a/packages/create-llama/helpers/git.ts
+++ /dev/null
@@ -1,58 +0,0 @@
-/* eslint-disable import/no-extraneous-dependencies */
-import { execSync } from "child_process";
-import fs from "fs";
-import path from "path";
-
-function isInGitRepository(): boolean {
-  try {
-    execSync("git rev-parse --is-inside-work-tree", { stdio: "ignore" });
-    return true;
-  } catch (_) {}
-  return false;
-}
-
-function isInMercurialRepository(): boolean {
-  try {
-    execSync("hg --cwd . root", { stdio: "ignore" });
-    return true;
-  } catch (_) {}
-  return false;
-}
-
-function isDefaultBranchSet(): boolean {
-  try {
-    execSync("git config init.defaultBranch", { stdio: "ignore" });
-    return true;
-  } catch (_) {}
-  return false;
-}
-
-export function tryGitInit(root: string): boolean {
-  let didInit = false;
-  try {
-    execSync("git --version", { stdio: "ignore" });
-    if (isInGitRepository() || isInMercurialRepository()) {
-      return false;
-    }
-
-    execSync("git init", { stdio: "ignore" });
-    didInit = true;
-
-    if (!isDefaultBranchSet()) {
-      execSync("git checkout -b main", { stdio: "ignore" });
-    }
-
-    execSync("git add -A", { stdio: "ignore" });
-    execSync('git commit -m "Initial commit from Create Llama"', {
-      stdio: "ignore",
-    });
-    return true;
-  } catch (e) {
-    if (didInit) {
-      try {
-        fs.rmSync(path.join(root, ".git"), { recursive: true, force: true });
-      } catch (_) {}
-    }
-    return false;
-  }
-}
diff --git a/packages/create-llama/helpers/index.ts b/packages/create-llama/helpers/index.ts
deleted file mode 100644
index 91280af35319e501b99325c9152a849aa2e536af..0000000000000000000000000000000000000000
--- a/packages/create-llama/helpers/index.ts
+++ /dev/null
@@ -1,192 +0,0 @@
-import { copy } from "./copy";
-import { callPackageManager } from "./install";
-
-import fs from "fs/promises";
-import path from "path";
-import { cyan } from "picocolors";
-
-import { COMMUNITY_OWNER, COMMUNITY_REPO } from "./constant";
-import { templatesDir } from "./dir";
-import { createBackendEnvFile, createFrontendEnvFile } from "./env-variables";
-import { PackageManager } from "./get-pkg-manager";
-import { installLlamapackProject } from "./llama-pack";
-import { isHavingPoetryLockFile, tryPoetryRun } from "./poetry";
-import { installPythonTemplate } from "./python";
-import { downloadAndExtractRepo } from "./repo";
-import {
-  FileSourceConfig,
-  InstallTemplateArgs,
-  TemplateDataSource,
-  TemplateFramework,
-  TemplateVectorDB,
-} from "./types";
-import { installTSTemplate } from "./typescript";
-
-// eslint-disable-next-line max-params
-async function generateContextData(
-  framework: TemplateFramework,
-  packageManager?: PackageManager,
-  openAiKey?: string,
-  vectorDb?: TemplateVectorDB,
-  dataSource?: TemplateDataSource,
-  llamaCloudKey?: string,
-) {
-  if (packageManager) {
-    const runGenerate = `${cyan(
-      framework === "fastapi"
-        ? "poetry run python app/engine/generate.py"
-        : `${packageManager} run generate`,
-    )}`;
-    const openAiKeyConfigured = openAiKey || process.env["OPENAI_API_KEY"];
-    const llamaCloudKeyConfigured = (dataSource?.config as FileSourceConfig)
-      ?.useLlamaParse
-      ? llamaCloudKey || process.env["LLAMA_CLOUD_API_KEY"]
-      : true;
-    const hasVectorDb = vectorDb && vectorDb !== "none";
-    if (framework === "fastapi") {
-      if (
-        openAiKeyConfigured &&
-        llamaCloudKeyConfigured &&
-        !hasVectorDb &&
-        isHavingPoetryLockFile()
-      ) {
-        console.log(`Running ${runGenerate} to generate the context data.`);
-        const result = tryPoetryRun("python app/engine/generate.py");
-        if (!result) {
-          console.log(`Failed to run ${runGenerate}.`);
-          process.exit(1);
-        }
-        console.log(`Generated context data`);
-        return;
-      }
-    } else {
-      if (openAiKeyConfigured && vectorDb === "none") {
-        console.log(`Running ${runGenerate} to generate the context data.`);
-        await callPackageManager(packageManager, true, ["run", "generate"]);
-        return;
-      }
-    }
-
-    const settings = [];
-    if (!openAiKeyConfigured) settings.push("your OpenAI key");
-    if (!llamaCloudKeyConfigured) settings.push("your Llama Cloud key");
-    if (hasVectorDb) settings.push("your Vector DB environment variables");
-    const settingsMessage =
-      settings.length > 0 ? `After setting ${settings.join(" and ")}, ` : "";
-    const generateMessage = `run ${runGenerate} to generate the context data.`;
-    console.log(`\n${settingsMessage}${generateMessage}\n\n`);
-  }
-}
-
-const copyContextData = async (
-  root: string,
-  dataSource?: TemplateDataSource,
-) => {
-  const destPath = path.join(root, "data");
-
-  const dataSourceConfig = dataSource?.config as FileSourceConfig;
-
-  // Copy file
-  if (dataSource?.type === "file") {
-    if (dataSourceConfig.path) {
-      console.log(`\nCopying file to ${cyan(destPath)}\n`);
-      await fs.mkdir(destPath, { recursive: true });
-      await fs.copyFile(
-        dataSourceConfig.path,
-        path.join(destPath, path.basename(dataSourceConfig.path)),
-      );
-    } else {
-      console.log("Missing file path in config");
-      process.exit(1);
-    }
-    return;
-  }
-
-  // Copy folder
-  if (dataSource?.type === "folder") {
-    const srcPath =
-      dataSourceConfig.path ?? path.join(templatesDir, "components", "data");
-    console.log(`\nCopying data to ${cyan(destPath)}\n`);
-    await copy("**", destPath, {
-      parents: true,
-      cwd: srcPath,
-    });
-    return;
-  }
-};
-
-const installCommunityProject = async ({
-  root,
-  communityProjectPath,
-}: Pick<InstallTemplateArgs, "root" | "communityProjectPath">) => {
-  console.log("\nInstalling community project:", communityProjectPath!);
-  await downloadAndExtractRepo(root, {
-    username: COMMUNITY_OWNER,
-    name: COMMUNITY_REPO,
-    branch: "main",
-    filePath: communityProjectPath!,
-  });
-};
-
-export const installTemplate = async (
-  props: InstallTemplateArgs & { backend: boolean },
-) => {
-  process.chdir(props.root);
-
-  if (props.template === "community" && props.communityProjectPath) {
-    await installCommunityProject(props);
-    return;
-  }
-
-  if (props.template === "llamapack" && props.llamapack) {
-    await installLlamapackProject(props);
-    return;
-  }
-
-  if (props.framework === "fastapi") {
-    await installPythonTemplate(props);
-  } else {
-    await installTSTemplate(props);
-  }
-
-  if (props.backend) {
-    // This is a backend, so we need to copy the test data and create the env file.
-
-    // Copy the environment file to the target directory.
-    await createBackendEnvFile(props.root, {
-      openAiKey: props.openAiKey,
-      llamaCloudKey: props.llamaCloudKey,
-      vectorDb: props.vectorDb,
-      model: props.model,
-      embeddingModel: props.embeddingModel,
-      framework: props.framework,
-      dataSource: props.dataSource,
-      port: props.externalPort,
-    });
-
-    if (props.engine === "context") {
-      await copyContextData(props.root, props.dataSource);
-      if (
-        props.postInstallAction === "runApp" ||
-        props.postInstallAction === "dependencies"
-      ) {
-        await generateContextData(
-          props.framework,
-          props.packageManager,
-          props.openAiKey,
-          props.vectorDb,
-          props.dataSource,
-          props.llamaCloudKey,
-        );
-      }
-    }
-  } else {
-    // this is a frontend for a full-stack app, create .env file with model information
-    createFrontendEnvFile(props.root, {
-      model: props.model,
-      customApiPath: props.customApiPath,
-    });
-  }
-};
-
-export * from "./types";
diff --git a/packages/create-llama/helpers/install.ts b/packages/create-llama/helpers/install.ts
deleted file mode 100644
index 9f0f20356265081bbb062454b14d1c9059544900..0000000000000000000000000000000000000000
--- a/packages/create-llama/helpers/install.ts
+++ /dev/null
@@ -1,50 +0,0 @@
-/* eslint-disable import/no-extraneous-dependencies */
-import spawn from "cross-spawn";
-import { yellow } from "picocolors";
-import type { PackageManager } from "./get-pkg-manager";
-
-/**
- * Spawn a package manager installation based on user preference.
- *
- * @returns A Promise that resolves once the installation is finished.
- */
-export async function callPackageManager(
-  /** Indicate which package manager to use. */
-  packageManager: PackageManager,
-  /** Indicate whether there is an active Internet connection.*/
-  isOnline: boolean,
-  args: string[] = ["install"],
-): Promise<void> {
-  if (!isOnline) {
-    console.log(
-      yellow("You appear to be offline.\nFalling back to the local cache."),
-    );
-    args.push("--offline");
-  }
-  /**
-   * Return a Promise that resolves once the installation is finished.
-   */
-  return new Promise((resolve, reject) => {
-    /**
-     * Spawn the installation process.
-     */
-    const child = spawn(packageManager, args, {
-      stdio: "inherit",
-      env: {
-        ...process.env,
-        ADBLOCK: "1",
-        // we set NODE_ENV to development as pnpm skips dev
-        // dependencies when production
-        NODE_ENV: "development",
-        DISABLE_OPENCOLLECTIVE: "1",
-      },
-    });
-    child.on("close", (code) => {
-      if (code !== 0) {
-        reject({ command: `${packageManager} ${args.join(" ")}` });
-        return;
-      }
-      resolve();
-    });
-  });
-}
diff --git a/packages/create-llama/helpers/is-folder-empty.ts b/packages/create-llama/helpers/is-folder-empty.ts
deleted file mode 100644
index 927a344c00fe80bc8c906349d70ffad5bb47d15e..0000000000000000000000000000000000000000
--- a/packages/create-llama/helpers/is-folder-empty.ts
+++ /dev/null
@@ -1,62 +0,0 @@
-/* eslint-disable import/no-extraneous-dependencies */
-import fs from "fs";
-import path from "path";
-import { blue, green } from "picocolors";
-
-export function isFolderEmpty(root: string, name: string): boolean {
-  const validFiles = [
-    ".DS_Store",
-    ".git",
-    ".gitattributes",
-    ".gitignore",
-    ".gitlab-ci.yml",
-    ".hg",
-    ".hgcheck",
-    ".hgignore",
-    ".idea",
-    ".npmignore",
-    ".travis.yml",
-    "LICENSE",
-    "Thumbs.db",
-    "docs",
-    "mkdocs.yml",
-    "npm-debug.log",
-    "yarn-debug.log",
-    "yarn-error.log",
-    "yarnrc.yml",
-    ".yarn",
-  ];
-
-  const conflicts = fs
-    .readdirSync(root)
-    .filter((file) => !validFiles.includes(file))
-    // Support IntelliJ IDEA-based editors
-    .filter((file) => !/\.iml$/.test(file));
-
-  if (conflicts.length > 0) {
-    console.log(
-      `The directory ${green(name)} contains files that could conflict:`,
-    );
-    console.log();
-    for (const file of conflicts) {
-      try {
-        const stats = fs.lstatSync(path.join(root, file));
-        if (stats.isDirectory()) {
-          console.log(`  ${blue(file)}/`);
-        } else {
-          console.log(`  ${file}`);
-        }
-      } catch {
-        console.log(`  ${file}`);
-      }
-    }
-    console.log();
-    console.log(
-      "Either try using a new directory name, or remove the files listed above.",
-    );
-    console.log();
-    return false;
-  }
-
-  return true;
-}
diff --git a/packages/create-llama/helpers/is-online.ts b/packages/create-llama/helpers/is-online.ts
deleted file mode 100644
index eab6980053517b8c350f537eb34ef238826909a8..0000000000000000000000000000000000000000
--- a/packages/create-llama/helpers/is-online.ts
+++ /dev/null
@@ -1,40 +0,0 @@
-import { execSync } from "child_process";
-import dns from "dns";
-import url from "url";
-
-function getProxy(): string | undefined {
-  if (process.env.https_proxy) {
-    return process.env.https_proxy;
-  }
-
-  try {
-    const httpsProxy = execSync("npm config get https-proxy").toString().trim();
-    return httpsProxy !== "null" ? httpsProxy : undefined;
-  } catch (e) {
-    return;
-  }
-}
-
-export function getOnline(): Promise<boolean> {
-  return new Promise((resolve) => {
-    dns.lookup("registry.yarnpkg.com", (registryErr) => {
-      if (!registryErr) {
-        return resolve(true);
-      }
-
-      const proxy = getProxy();
-      if (!proxy) {
-        return resolve(false);
-      }
-
-      const { hostname } = url.parse(proxy);
-      if (!hostname) {
-        return resolve(false);
-      }
-
-      dns.lookup(hostname, (proxyErr) => {
-        resolve(proxyErr == null);
-      });
-    });
-  });
-}
diff --git a/packages/create-llama/helpers/is-url.ts b/packages/create-llama/helpers/is-url.ts
deleted file mode 100644
index eb87b975252f721bf596f98b628f4b6394a63516..0000000000000000000000000000000000000000
--- a/packages/create-llama/helpers/is-url.ts
+++ /dev/null
@@ -1,8 +0,0 @@
-export function isUrl(url: string): boolean {
-  try {
-    new URL(url);
-    return true;
-  } catch (error) {
-    return false;
-  }
-}
diff --git a/packages/create-llama/helpers/is-writeable.ts b/packages/create-llama/helpers/is-writeable.ts
deleted file mode 100644
index fa29d60558b33c0cd9823ec29842d48648b37901..0000000000000000000000000000000000000000
--- a/packages/create-llama/helpers/is-writeable.ts
+++ /dev/null
@@ -1,10 +0,0 @@
-import fs from "fs";
-
-export async function isWriteable(directory: string): Promise<boolean> {
-  try {
-    await fs.promises.access(directory, (fs.constants || fs).W_OK);
-    return true;
-  } catch (err) {
-    return false;
-  }
-}
diff --git a/packages/create-llama/helpers/llama-pack.ts b/packages/create-llama/helpers/llama-pack.ts
deleted file mode 100644
index 887201d9fcdd2a0ddcdac8199cbc655a186924df..0000000000000000000000000000000000000000
--- a/packages/create-llama/helpers/llama-pack.ts
+++ /dev/null
@@ -1,148 +0,0 @@
-import fs from "fs/promises";
-import got from "got";
-import path from "path";
-import { parse } from "smol-toml";
-import {
-  LLAMA_PACK_FOLDER,
-  LLAMA_PACK_FOLDER_PATH,
-  LLAMA_PACK_OWNER,
-  LLAMA_PACK_REPO,
-} from "./constant";
-import { copy } from "./copy";
-import { templatesDir } from "./dir";
-import { addDependencies, installPythonDependencies } from "./python";
-import { getRepoRawContent } from "./repo";
-import { InstallTemplateArgs } from "./types";
-
-const getLlamaPackFolderSHA = async () => {
-  const url = `https://api.github.com/repos/${LLAMA_PACK_OWNER}/${LLAMA_PACK_REPO}/contents`;
-  const response = await got(url, {
-    responseType: "json",
-  });
-  const data = response.body as any[];
-  const llamaPackFolder = data.find((item) => item.name === LLAMA_PACK_FOLDER);
-  return llamaPackFolder.sha;
-};
-
-const getLLamaPackFolderTree = async (
-  sha: string,
-): Promise<
-  Array<{
-    path: string;
-  }>
-> => {
-  const url = `https://api.github.com/repos/${LLAMA_PACK_OWNER}/${LLAMA_PACK_REPO}/git/trees/${sha}?recursive=1`;
-  const response = await got(url, {
-    responseType: "json",
-  });
-  return (response.body as any).tree;
-};
-
-export async function getAvailableLlamapackOptions(): Promise<
-  {
-    name: string;
-    folderPath: string;
-  }[]
-> {
-  const EXAMPLE_RELATIVE_PATH = "/examples/example.py";
-  const PACK_FOLDER_SUBFIX = "llama-index-packs";
-
-  const llamaPackFolderSHA = await getLlamaPackFolderSHA();
-  const llamaPackTree = await getLLamaPackFolderTree(llamaPackFolderSHA);
-
-  // Return options that have example files
-  const exampleFiles = llamaPackTree.filter((item) =>
-    item.path.endsWith(EXAMPLE_RELATIVE_PATH),
-  );
-  const options = exampleFiles.map((file) => {
-    const packFolder = file.path.substring(
-      0,
-      file.path.indexOf(EXAMPLE_RELATIVE_PATH),
-    );
-    const packName = packFolder.substring(PACK_FOLDER_SUBFIX.length + 1);
-    return {
-      name: packName,
-      folderPath: packFolder,
-    };
-  });
-  return options;
-}
-
-const copyLlamapackEmptyProject = async ({
-  root,
-}: Pick<InstallTemplateArgs, "root">) => {
-  const templatePath = path.join(
-    templatesDir,
-    "components/sample-projects/llamapack",
-  );
-  await copy("**", root, {
-    parents: true,
-    cwd: templatePath,
-  });
-};
-
-const copyData = async ({
-  root,
-}: Pick<InstallTemplateArgs, "root" | "llamapack">) => {
-  const dataPath = path.join(templatesDir, "components/data");
-  await copy("**", path.join(root, "data"), {
-    parents: true,
-    cwd: dataPath,
-  });
-};
-
-const installLlamapackExample = async ({
-  root,
-  llamapack,
-}: Pick<InstallTemplateArgs, "root" | "llamapack">) => {
-  const exampleFileName = "example.py";
-  const readmeFileName = "README.md";
-  const projectTomlFileName = "pyproject.toml";
-  const exampleFilePath = `${LLAMA_PACK_FOLDER_PATH}/${llamapack}/examples/${exampleFileName}`;
-  const readmeFilePath = `${LLAMA_PACK_FOLDER_PATH}/${llamapack}/${readmeFileName}`;
-  const projectTomlFilePath = `${LLAMA_PACK_FOLDER_PATH}/${llamapack}/${projectTomlFileName}`;
-
-  // Download example.py from llamapack and save to root
-  const exampleContent = await getRepoRawContent(exampleFilePath);
-  await fs.writeFile(path.join(root, exampleFileName), exampleContent);
-
-  // Download README.md from llamapack and combine with README-template.md,
-  // save to root and then delete template file
-  const readmeContent = await getRepoRawContent(readmeFilePath);
-  const readmeTemplateContent = await fs.readFile(
-    path.join(root, "README-template.md"),
-    "utf-8",
-  );
-  await fs.writeFile(
-    path.join(root, readmeFileName),
-    `${readmeContent}\n${readmeTemplateContent}`,
-  );
-  await fs.unlink(path.join(root, "README-template.md"));
-
-  // Download pyproject.toml from llamapack, parse it to get package name and version,
-  // then add it as a dependency to current toml file in the project
-  const projectTomlContent = await getRepoRawContent(projectTomlFilePath);
-  const fileParsed = parse(projectTomlContent) as any;
-  const packageName = fileParsed.tool.poetry.name;
-  const packageVersion = fileParsed.tool.poetry.version;
-  await addDependencies(root, [
-    {
-      name: packageName,
-      version: packageVersion,
-    },
-  ]);
-};
-
-export const installLlamapackProject = async ({
-  root,
-  llamapack,
-  postInstallAction,
-}: Pick<InstallTemplateArgs, "root" | "llamapack" | "postInstallAction">) => {
-  console.log("\nInstalling Llamapack project:", llamapack!);
-  await copyLlamapackEmptyProject({ root });
-  await copyData({ root });
-  await installLlamapackExample({ root, llamapack });
-  if (postInstallAction === "runApp" || postInstallAction === "dependencies") {
-    installPythonDependencies({ noRoot: true });
-  }
-};
diff --git a/packages/create-llama/helpers/make-dir.ts b/packages/create-llama/helpers/make-dir.ts
deleted file mode 100644
index 2c258fd6b5a0d2362c13cfcbade7cac21702fb5a..0000000000000000000000000000000000000000
--- a/packages/create-llama/helpers/make-dir.ts
+++ /dev/null
@@ -1,8 +0,0 @@
-import fs from "fs";
-
-export function makeDir(
-  root: string,
-  options = { recursive: true },
-): Promise<string | undefined> {
-  return fs.promises.mkdir(root, options);
-}
diff --git a/packages/create-llama/helpers/poetry.ts b/packages/create-llama/helpers/poetry.ts
deleted file mode 100644
index fe8759d7cf62e0ed99ea6b03a5a44cab635ec3f9..0000000000000000000000000000000000000000
--- a/packages/create-llama/helpers/poetry.ts
+++ /dev/null
@@ -1,36 +0,0 @@
-/* eslint-disable import/no-extraneous-dependencies */
-import { execSync } from "child_process";
-import fs from "fs";
-
-export function isPoetryAvailable(): boolean {
-  try {
-    execSync("poetry --version", { stdio: "ignore" });
-    return true;
-  } catch (_) {}
-  return false;
-}
-
-export function tryPoetryInstall(noRoot: boolean): boolean {
-  try {
-    execSync(`poetry install${noRoot ? " --no-root" : ""}`, {
-      stdio: "inherit",
-    });
-    return true;
-  } catch (_) {}
-  return false;
-}
-
-export function tryPoetryRun(command: string): boolean {
-  try {
-    execSync(`poetry run ${command}`, { stdio: "inherit" });
-    return true;
-  } catch (_) {}
-  return false;
-}
-
-export function isHavingPoetryLockFile(): boolean {
-  try {
-    return fs.existsSync("poetry.lock");
-  } catch (_) {}
-  return false;
-}
diff --git a/packages/create-llama/helpers/python.ts b/packages/create-llama/helpers/python.ts
deleted file mode 100644
index 285beb9a8b377b1c757c84765fca5e5766bba9f4..0000000000000000000000000000000000000000
--- a/packages/create-llama/helpers/python.ts
+++ /dev/null
@@ -1,272 +0,0 @@
-import fs from "fs/promises";
-import path from "path";
-import { cyan, red } from "picocolors";
-import { parse, stringify } from "smol-toml";
-import terminalLink from "terminal-link";
-import { copy } from "./copy";
-import { templatesDir } from "./dir";
-import { isPoetryAvailable, tryPoetryInstall } from "./poetry";
-import { Tool } from "./tools";
-import {
-  FileSourceConfig,
-  InstallTemplateArgs,
-  TemplateDataSource,
-  TemplateVectorDB,
-} from "./types";
-
-interface Dependency {
-  name: string;
-  version?: string;
-  extras?: string[];
-}
-
-const getAdditionalDependencies = (
-  vectorDb?: TemplateVectorDB,
-  dataSource?: TemplateDataSource,
-  tools?: Tool[],
-) => {
-  const dependencies: Dependency[] = [];
-
-  // Add vector db dependencies
-  switch (vectorDb) {
-    case "mongo": {
-      dependencies.push({
-        name: "llama-index-vector-stores-mongodb",
-        version: "^0.1.3",
-      });
-      break;
-    }
-    case "pg": {
-      dependencies.push({
-        name: "llama-index-vector-stores-postgres",
-        version: "^0.1.1",
-      });
-    }
-    case "pinecone": {
-      dependencies.push({
-        name: "llama-index-vector-stores-pinecone",
-        version: "^0.1.3",
-      });
-      break;
-    }
-  }
-
-  // Add data source dependencies
-  const dataSourceType = dataSource?.type;
-  if (dataSourceType === "file" || dataSourceType === "folder") {
-    // llama-index-readers-file (pdf, excel, csv) is already included in llama_index package
-    dependencies.push({
-      name: "docx2txt",
-      version: "^0.8",
-    });
-  } else if (dataSourceType === "web") {
-    dependencies.push({
-      name: "llama-index-readers-web",
-      version: "^0.1.6",
-    });
-  }
-
-  // Add tools dependencies
-  tools?.forEach((tool) => {
-    tool.dependencies?.forEach((dep) => {
-      dependencies.push(dep);
-    });
-  });
-
-  return dependencies;
-};
-
-const mergePoetryDependencies = (
-  dependencies: Dependency[],
-  existingDependencies: Record<string, Omit<Dependency, "name">>,
-) => {
-  for (const dependency of dependencies) {
-    let value = existingDependencies[dependency.name] ?? {};
-
-    // default string value is equal to attribute "version"
-    if (typeof value === "string") {
-      value = { version: value };
-    }
-
-    value.version = dependency.version ?? value.version;
-    value.extras = dependency.extras ?? value.extras;
-
-    if (value.version === undefined) {
-      throw new Error(
-        `Dependency "${dependency.name}" is missing attribute "version"!`,
-      );
-    }
-
-    existingDependencies[dependency.name] = value;
-  }
-};
-
-export const addDependencies = async (
-  projectDir: string,
-  dependencies: Dependency[],
-) => {
-  if (dependencies.length === 0) return;
-
-  const FILENAME = "pyproject.toml";
-  try {
-    // Parse toml file
-    const file = path.join(projectDir, FILENAME);
-    const fileContent = await fs.readFile(file, "utf8");
-    const fileParsed = parse(fileContent);
-
-    // Modify toml dependencies
-    const tool = fileParsed.tool as any;
-    const existingDependencies = tool.poetry.dependencies;
-    mergePoetryDependencies(dependencies, existingDependencies);
-
-    // Write toml file
-    const newFileContent = stringify(fileParsed);
-    await fs.writeFile(file, newFileContent);
-
-    const dependenciesString = dependencies.map((d) => d.name).join(", ");
-    console.log(`\nAdded ${dependenciesString} to ${cyan(FILENAME)}\n`);
-  } catch (error) {
-    console.log(
-      `Error while updating dependencies for Poetry project file ${FILENAME}\n`,
-      error,
-    );
-  }
-};
-
-export const installPythonDependencies = (
-  { noRoot }: { noRoot: boolean } = { noRoot: false },
-) => {
-  if (isPoetryAvailable()) {
-    console.log(
-      `Installing python dependencies using poetry. This may take a while...`,
-    );
-    const installSuccessful = tryPoetryInstall(noRoot);
-    if (!installSuccessful) {
-      console.error(
-        red(
-          "Installing dependencies using poetry failed. Please check error log above and try running create-llama again.",
-        ),
-      );
-      process.exit(1);
-    }
-  } else {
-    console.error(
-      red(
-        `Poetry is not available in the current environment. Please check ${terminalLink(
-          "Poetry Installation",
-          `https://python-poetry.org/docs/#installation`,
-        )} to install poetry first, then run create-llama again.`,
-      ),
-    );
-    process.exit(1);
-  }
-};
-
-export const installPythonTemplate = async ({
-  root,
-  template,
-  framework,
-  engine,
-  vectorDb,
-  dataSource,
-  tools,
-  postInstallAction,
-}: Pick<
-  InstallTemplateArgs,
-  | "root"
-  | "framework"
-  | "template"
-  | "engine"
-  | "vectorDb"
-  | "dataSource"
-  | "tools"
-  | "postInstallAction"
->) => {
-  console.log("\nInitializing Python project with template:", template, "\n");
-  const templatePath = path.join(templatesDir, "types", template, framework);
-  await copy("**", root, {
-    parents: true,
-    cwd: templatePath,
-    rename(name) {
-      switch (name) {
-        case "gitignore": {
-          return `.${name}`;
-        }
-        // README.md is ignored by webpack-asset-relocator-loader used by ncc:
-        // https://github.com/vercel/webpack-asset-relocator-loader/blob/e9308683d47ff507253e37c9bcbb99474603192b/src/asset-relocator.js#L227
-        case "README-template.md": {
-          return "README.md";
-        }
-        default: {
-          return name;
-        }
-      }
-    },
-  });
-
-  if (engine === "context") {
-    const enginePath = path.join(root, "app", "engine");
-    const compPath = path.join(templatesDir, "components");
-
-    const vectorDbDirName = vectorDb ?? "none";
-    const VectorDBPath = path.join(
-      compPath,
-      "vectordbs",
-      "python",
-      vectorDbDirName,
-    );
-    await copy("**", enginePath, {
-      parents: true,
-      cwd: VectorDBPath,
-    });
-
-    // Copy engine code
-    if (tools !== undefined && tools.length > 0) {
-      await copy("**", enginePath, {
-        parents: true,
-        cwd: path.join(compPath, "engines", "python", "agent"),
-      });
-      // Write tools_config.json
-      const configContent: Record<string, any> = {};
-      tools.forEach((tool) => {
-        configContent[tool.name] = tool.config ?? {};
-      });
-      const configFilePath = path.join(root, "tools_config.json");
-      await fs.writeFile(
-        configFilePath,
-        JSON.stringify(configContent, null, 2),
-      );
-    } else {
-      await copy("**", enginePath, {
-        parents: true,
-        cwd: path.join(compPath, "engines", "python", "chat"),
-      });
-    }
-
-    const dataSourceType = dataSource?.type;
-    if (dataSourceType !== undefined && dataSourceType !== "none") {
-      let loaderFolder: string;
-      if (dataSourceType === "file" || dataSourceType === "folder") {
-        const dataSourceConfig = dataSource?.config as FileSourceConfig;
-        loaderFolder = dataSourceConfig.useLlamaParse ? "llama_parse" : "file";
-      } else {
-        loaderFolder = dataSourceType;
-      }
-      await copy("**", enginePath, {
-        parents: true,
-        cwd: path.join(compPath, "loaders", "python", loaderFolder),
-      });
-    }
-  }
-
-  const addOnDependencies = getAdditionalDependencies(
-    vectorDb,
-    dataSource,
-    tools,
-  );
-  await addDependencies(root, addOnDependencies);
-
-  if (postInstallAction === "runApp" || postInstallAction === "dependencies") {
-    installPythonDependencies();
-  }
-};
diff --git a/packages/create-llama/helpers/repo.ts b/packages/create-llama/helpers/repo.ts
deleted file mode 100644
index 3942c28cd08ae5a5d42f7db5a97d170434b041ed..0000000000000000000000000000000000000000
--- a/packages/create-llama/helpers/repo.ts
+++ /dev/null
@@ -1,71 +0,0 @@
-import { createWriteStream, promises } from "fs";
-import got from "got";
-import { tmpdir } from "os";
-import { join } from "path";
-import { Stream } from "stream";
-import tar from "tar";
-import { promisify } from "util";
-import { makeDir } from "./make-dir";
-
-export type RepoInfo = {
-  username: string;
-  name: string;
-  branch: string;
-  filePath: string;
-};
-
-const pipeline = promisify(Stream.pipeline);
-
-async function downloadTar(url: string) {
-  const tempFile = join(tmpdir(), `next.js-cna-example.temp-${Date.now()}`);
-  await pipeline(got.stream(url), createWriteStream(tempFile));
-  return tempFile;
-}
-
-export async function downloadAndExtractRepo(
-  root: string,
-  { username, name, branch, filePath }: RepoInfo,
-) {
-  await makeDir(root);
-
-  const tempFile = await downloadTar(
-    `https://codeload.github.com/${username}/${name}/tar.gz/${branch}`,
-  );
-
-  await tar.x({
-    file: tempFile,
-    cwd: root,
-    strip: filePath ? filePath.split("/").length + 1 : 1,
-    filter: (p) =>
-      p.startsWith(
-        `${name}-${branch.replace(/\//g, "-")}${
-          filePath ? `/${filePath}/` : "/"
-        }`,
-      ),
-  });
-
-  await promises.unlink(tempFile);
-}
-
-export async function getRepoRootFolders(
-  owner: string,
-  repo: string,
-): Promise<string[]> {
-  const url = `https://api.github.com/repos/${owner}/${repo}/contents`;
-
-  const response = await got(url, {
-    responseType: "json",
-  });
-
-  const data = response.body as any[];
-  const folders = data.filter((item) => item.type === "dir");
-  return folders.map((item) => item.name);
-}
-
-export async function getRepoRawContent(repoFilePath: string) {
-  const url = `https://raw.githubusercontent.com/${repoFilePath}`;
-  const response = await got(url, {
-    responseType: "text",
-  });
-  return response.body;
-}
diff --git a/packages/create-llama/helpers/run-app.ts b/packages/create-llama/helpers/run-app.ts
deleted file mode 100644
index 616787a7398af4fed2bada689a07d0944f01cdbd..0000000000000000000000000000000000000000
--- a/packages/create-llama/helpers/run-app.ts
+++ /dev/null
@@ -1,88 +0,0 @@
-import { ChildProcess, SpawnOptions, spawn } from "child_process";
-import path from "path";
-import { TemplateFramework } from "./types";
-
-const createProcess = (
-  command: string,
-  args: string[],
-  options: SpawnOptions,
-) => {
-  return spawn(command, args, {
-    ...options,
-    shell: true,
-  })
-    .on("exit", function (code) {
-      if (code !== 0) {
-        console.log(`Child process exited with code=${code}`);
-        process.exit(1);
-      }
-    })
-    .on("error", function (err) {
-      console.log("Error when running chill process: ", err);
-      process.exit(1);
-    });
-};
-
-// eslint-disable-next-line max-params
-export async function runApp(
-  appPath: string,
-  frontend: boolean,
-  framework: TemplateFramework,
-  port?: number,
-  externalPort?: number,
-): Promise<any> {
-  let backendAppProcess: ChildProcess;
-  let frontendAppProcess: ChildProcess | undefined;
-  const frontendPort = port || 3000;
-  let backendPort = externalPort || 8000;
-
-  // Callback to kill app processes
-  process.on("exit", () => {
-    console.log("Killing app processes...");
-    backendAppProcess.kill();
-    frontendAppProcess?.kill();
-  });
-
-  let backendCommand = "";
-  let backendArgs: string[];
-  if (framework === "fastapi") {
-    backendCommand = "poetry";
-    backendArgs = [
-      "run",
-      "uvicorn",
-      "main:app",
-      "--host=0.0.0.0",
-      "--port=" + backendPort,
-    ];
-  } else if (framework === "nextjs") {
-    backendCommand = "npm";
-    backendArgs = ["run", "dev"];
-    backendPort = frontendPort;
-  } else {
-    backendCommand = "npm";
-    backendArgs = ["run", "dev"];
-  }
-
-  if (frontend) {
-    return new Promise((resolve, reject) => {
-      backendAppProcess = createProcess(backendCommand, backendArgs, {
-        stdio: "inherit",
-        cwd: path.join(appPath, "backend"),
-        env: { ...process.env, PORT: `${backendPort}` },
-      });
-      frontendAppProcess = createProcess("npm", ["run", "dev"], {
-        stdio: "inherit",
-        cwd: path.join(appPath, "frontend"),
-        env: { ...process.env, PORT: `${frontendPort}` },
-      });
-    });
-  } else {
-    return new Promise((resolve, reject) => {
-      backendAppProcess = createProcess(backendCommand, backendArgs, {
-        stdio: "inherit",
-        cwd: path.join(appPath),
-        env: { ...process.env, PORT: `${backendPort}` },
-      });
-    });
-  }
-}
diff --git a/packages/create-llama/helpers/tools.ts b/packages/create-llama/helpers/tools.ts
deleted file mode 100644
index 49559253776d3fef308b130e14a067fd6481db6a..0000000000000000000000000000000000000000
--- a/packages/create-llama/helpers/tools.ts
+++ /dev/null
@@ -1,71 +0,0 @@
-import { red } from "picocolors";
-
-export type Tool = {
-  display: string;
-  name: string;
-  config?: Record<string, any>;
-  dependencies?: ToolDependencies[];
-};
-export type ToolDependencies = {
-  name: string;
-  version?: string;
-};
-
-export const supportedTools: Tool[] = [
-  {
-    display: "Google Search (configuration required after installation)",
-    name: "google.GoogleSearchToolSpec",
-    config: {
-      engine:
-        "Your search engine id, see https://developers.google.com/custom-search/v1/overview#prerequisites",
-      key: "Your search api key",
-      num: 2,
-    },
-    dependencies: [
-      {
-        name: "llama-index-tools-google",
-        version: "0.1.2",
-      },
-    ],
-  },
-  {
-    display: "Wikipedia",
-    name: "wikipedia.WikipediaToolSpec",
-    dependencies: [
-      {
-        name: "llama-index-tools-wikipedia",
-        version: "0.1.2",
-      },
-    ],
-  },
-];
-
-export const getTool = (toolName: string): Tool | undefined => {
-  return supportedTools.find((tool) => tool.name === toolName);
-};
-
-export const getTools = (toolsName: string[]): Tool[] => {
-  const tools: Tool[] = [];
-  for (const toolName of toolsName) {
-    const tool = getTool(toolName);
-    if (!tool) {
-      console.log(
-        red(
-          `Error: Tool '${toolName}' is not supported. Supported tools are: ${supportedTools
-            .map((t) => t.name)
-            .join(", ")}`,
-        ),
-      );
-      process.exit(1);
-    }
-    tools.push(tool);
-  }
-  return tools;
-};
-
-export const toolsRequireConfig = (tools?: Tool[]): boolean => {
-  if (tools) {
-    return tools?.some((tool) => Object.keys(tool.config || {}).length > 0);
-  }
-  return false;
-};
diff --git a/packages/create-llama/helpers/types.ts b/packages/create-llama/helpers/types.ts
deleted file mode 100644
index 76be9af317babc463c508aec831e3df415df930f..0000000000000000000000000000000000000000
--- a/packages/create-llama/helpers/types.ts
+++ /dev/null
@@ -1,52 +0,0 @@
-import { PackageManager } from "../helpers/get-pkg-manager";
-import { Tool } from "./tools";
-
-export type TemplateType = "simple" | "streaming" | "community" | "llamapack";
-export type TemplateFramework = "nextjs" | "express" | "fastapi";
-export type TemplateEngine = "simple" | "context";
-export type TemplateUI = "html" | "shadcn";
-export type TemplateVectorDB = "none" | "mongo" | "pg" | "pinecone";
-export type TemplatePostInstallAction =
-  | "none"
-  | "VSCode"
-  | "dependencies"
-  | "runApp";
-export type TemplateDataSource = {
-  type: TemplateDataSourceType;
-  config: TemplateDataSourceConfig;
-};
-export type TemplateDataSourceType = "none" | "file" | "folder" | "web";
-// Config for both file and folder
-export type FileSourceConfig = {
-  path?: string;
-  useLlamaParse?: boolean;
-};
-export type WebSourceConfig = {
-  baseUrl?: string;
-  depth?: number;
-};
-export type TemplateDataSourceConfig = FileSourceConfig | WebSourceConfig;
-
-export interface InstallTemplateArgs {
-  appName: string;
-  root: string;
-  packageManager: PackageManager;
-  isOnline: boolean;
-  template: TemplateType;
-  framework: TemplateFramework;
-  engine: TemplateEngine;
-  ui: TemplateUI;
-  dataSource?: TemplateDataSource;
-  eslint: boolean;
-  customApiPath?: string;
-  openAiKey?: string;
-  llamaCloudKey?: string;
-  model: string;
-  embeddingModel: string;
-  communityProjectPath?: string;
-  llamapack?: string;
-  vectorDb?: TemplateVectorDB;
-  externalPort?: number;
-  postInstallAction?: TemplatePostInstallAction;
-  tools?: Tool[];
-}
diff --git a/packages/create-llama/helpers/typescript.ts b/packages/create-llama/helpers/typescript.ts
deleted file mode 100644
index e607f070bd3cde4ef9f787f990d9974e3c6c163f..0000000000000000000000000000000000000000
--- a/packages/create-llama/helpers/typescript.ts
+++ /dev/null
@@ -1,227 +0,0 @@
-import fs from "fs/promises";
-import os from "os";
-import path from "path";
-import { bold, cyan } from "picocolors";
-import { version } from "../../core/package.json";
-import { copy } from "../helpers/copy";
-import { callPackageManager } from "../helpers/install";
-import { templatesDir } from "./dir";
-import { PackageManager } from "./get-pkg-manager";
-import { InstallTemplateArgs } from "./types";
-
-const rename = (name: string) => {
-  switch (name) {
-    case "gitignore":
-    case "eslintrc.json": {
-      return `.${name}`;
-    }
-    // README.md is ignored by webpack-asset-relocator-loader used by ncc:
-    // https://github.com/vercel/webpack-asset-relocator-loader/blob/e9308683d47ff507253e37c9bcbb99474603192b/src/asset-relocator.js#L227
-    case "README-template.md": {
-      return "README.md";
-    }
-    default: {
-      return name;
-    }
-  }
-};
-
-export const installTSDependencies = async (
-  packageJson: any,
-  packageManager: PackageManager,
-  isOnline: boolean,
-): Promise<void> => {
-  console.log("\nInstalling dependencies:");
-  for (const dependency in packageJson.dependencies)
-    console.log(`- ${cyan(dependency)}`);
-
-  console.log("\nInstalling devDependencies:");
-  for (const dependency in packageJson.devDependencies)
-    console.log(`- ${cyan(dependency)}`);
-
-  console.log();
-
-  await callPackageManager(packageManager, isOnline).catch((error) => {
-    console.error("Failed to install TS dependencies. Exiting...");
-    process.exit(1);
-  });
-};
-
-/**
- * Install a LlamaIndex internal template to a given `root` directory.
- */
-export const installTSTemplate = async ({
-  appName,
-  root,
-  packageManager,
-  isOnline,
-  template,
-  framework,
-  engine,
-  ui,
-  eslint,
-  customApiPath,
-  vectorDb,
-  postInstallAction,
-  backend,
-}: InstallTemplateArgs & { backend: boolean }) => {
-  console.log(bold(`Using ${packageManager}.`));
-
-  /**
-   * Copy the template files to the target directory.
-   */
-  console.log("\nInitializing project with template:", template, "\n");
-  const templatePath = path.join(templatesDir, "types", template, framework);
-  const copySource = ["**"];
-  if (!eslint) copySource.push("!eslintrc.json");
-
-  await copy(copySource, root, {
-    parents: true,
-    cwd: templatePath,
-    rename,
-  });
-
-  /**
-   * If next.js is not used as a backend, update next.config.js to use static site generation.
-   */
-  if (framework === "nextjs" && !backend) {
-    // update next.config.json for static site generation
-    const nextConfigJsonFile = path.join(root, "next.config.json");
-    const nextConfigJson: any = JSON.parse(
-      await fs.readFile(nextConfigJsonFile, "utf8"),
-    );
-    nextConfigJson.output = "export";
-    nextConfigJson.images = { unoptimized: true };
-    await fs.writeFile(
-      nextConfigJsonFile,
-      JSON.stringify(nextConfigJson, null, 2) + os.EOL,
-    );
-  }
-
-  /**
-   * Copy the selected chat engine files to the target directory and reference it.
-   */
-  let relativeEngineDestPath;
-  const compPath = path.join(templatesDir, "components");
-  if (engine && (framework === "express" || framework === "nextjs")) {
-    console.log("\nUsing chat engine:", engine, "\n");
-
-    let vectorDBFolder: string = engine;
-
-    if (engine !== "simple" && vectorDb) {
-      console.log("\nUsing vector DB:", vectorDb, "\n");
-      vectorDBFolder = vectorDb;
-    }
-
-    const VectorDBPath = path.join(
-      compPath,
-      "vectordbs",
-      "typescript",
-      vectorDBFolder,
-    );
-    relativeEngineDestPath =
-      framework === "nextjs"
-        ? path.join("app", "api", "chat")
-        : path.join("src", "controllers");
-    await copy("**", path.join(root, relativeEngineDestPath, "engine"), {
-      parents: true,
-      cwd: VectorDBPath,
-    });
-  }
-
-  /**
-   * Copy the selected UI files to the target directory and reference it.
-   */
-  if (framework === "nextjs" && ui !== "shadcn") {
-    console.log("\nUsing UI:", ui, "\n");
-    const uiPath = path.join(compPath, "ui", ui);
-    const destUiPath = path.join(root, "app", "components", "ui");
-    // remove the default ui folder
-    await fs.rm(destUiPath, { recursive: true });
-    // copy the selected ui folder
-    await copy("**", destUiPath, {
-      parents: true,
-      cwd: uiPath,
-      rename,
-    });
-  }
-
-  /**
-   * Update the package.json scripts.
-   */
-  const packageJsonFile = path.join(root, "package.json");
-  const packageJson: any = JSON.parse(
-    await fs.readFile(packageJsonFile, "utf8"),
-  );
-  packageJson.name = appName;
-  packageJson.version = "0.1.0";
-
-  packageJson.dependencies = {
-    ...packageJson.dependencies,
-    llamaindex: version,
-  };
-
-  if (framework === "nextjs" && customApiPath) {
-    console.log(
-      "\nUsing external API with custom API path:",
-      customApiPath,
-      "\n",
-    );
-    // remove the default api folder
-    const apiPath = path.join(root, "app", "api");
-    await fs.rm(apiPath, { recursive: true });
-    // modify the dev script to use the custom api path
-  }
-
-  if (engine === "context" && relativeEngineDestPath) {
-    // add generate script if using context engine
-    packageJson.scripts = {
-      ...packageJson.scripts,
-      generate: `node ${path.join(
-        relativeEngineDestPath,
-        "engine",
-        "generate.mjs",
-      )}`,
-    };
-  }
-
-  if (framework === "nextjs" && ui === "html") {
-    // remove shadcn dependencies if html ui is selected
-    packageJson.dependencies = {
-      ...packageJson.dependencies,
-      "tailwind-merge": undefined,
-      "@radix-ui/react-slot": undefined,
-      "class-variance-authority": undefined,
-      clsx: undefined,
-      "lucide-react": undefined,
-      remark: undefined,
-      "remark-code-import": undefined,
-      "remark-gfm": undefined,
-      "remark-math": undefined,
-      "react-markdown": undefined,
-      "react-syntax-highlighter": undefined,
-    };
-
-    packageJson.devDependencies = {
-      ...packageJson.devDependencies,
-      "@types/react-syntax-highlighter": undefined,
-    };
-  }
-
-  if (!eslint) {
-    // Remove packages starting with "eslint" from devDependencies
-    packageJson.devDependencies = Object.fromEntries(
-      Object.entries(packageJson.devDependencies).filter(
-        ([key]) => !key.startsWith("eslint"),
-      ),
-    );
-  }
-  await fs.writeFile(
-    packageJsonFile,
-    JSON.stringify(packageJson, null, 2) + os.EOL,
-  );
-
-  if (postInstallAction === "runApp" || postInstallAction === "dependencies") {
-    await installTSDependencies(packageJson, packageManager, isOnline);
-  }
-};
diff --git a/packages/create-llama/helpers/validate-pkg.ts b/packages/create-llama/helpers/validate-pkg.ts
deleted file mode 100644
index 68317653c8e455aec9ce6d192f725a74b8199cfd..0000000000000000000000000000000000000000
--- a/packages/create-llama/helpers/validate-pkg.ts
+++ /dev/null
@@ -1,20 +0,0 @@
-// eslint-disable-next-line import/no-extraneous-dependencies
-import validateProjectName from "validate-npm-package-name";
-
-export function validateNpmName(name: string): {
-  valid: boolean;
-  problems?: string[];
-} {
-  const nameValidation = validateProjectName(name);
-  if (nameValidation.validForNewPackages) {
-    return { valid: true };
-  }
-
-  return {
-    valid: false,
-    problems: [
-      ...(nameValidation.errors || []),
-      ...(nameValidation.warnings || []),
-    ],
-  };
-}
diff --git a/packages/create-llama/index.ts b/packages/create-llama/index.ts
deleted file mode 100644
index 131d6eba2bc0d89b39438cf929ff73ff0614afc8..0000000000000000000000000000000000000000
--- a/packages/create-llama/index.ts
+++ /dev/null
@@ -1,383 +0,0 @@
-#!/usr/bin/env node
-/* eslint-disable import/no-extraneous-dependencies */
-import { execSync } from "child_process";
-import Commander from "commander";
-import Conf from "conf";
-import fs from "fs";
-import path from "path";
-import { bold, cyan, green, red, yellow } from "picocolors";
-import prompts from "prompts";
-import terminalLink from "terminal-link";
-import checkForUpdate from "update-check";
-import { createApp } from "./create-app";
-import { getPkgManager } from "./helpers/get-pkg-manager";
-import { isFolderEmpty } from "./helpers/is-folder-empty";
-import { runApp } from "./helpers/run-app";
-import { getTools } from "./helpers/tools";
-import { validateNpmName } from "./helpers/validate-pkg";
-import packageJson from "./package.json";
-import { QuestionArgs, askQuestions, onPromptState } from "./questions";
-
-let projectPath: string = "";
-
-const handleSigTerm = () => process.exit(0);
-
-process.on("SIGINT", handleSigTerm);
-process.on("SIGTERM", handleSigTerm);
-
-const program = new Commander.Command(packageJson.name)
-  .version(packageJson.version)
-  .arguments("<project-directory>")
-  .usage(`${green("<project-directory>")} [options]`)
-  .action((name) => {
-    projectPath = name;
-  })
-  .option(
-    "--eslint",
-    `
-
-  Initialize with eslint config.
-`,
-  )
-  .option(
-    "--use-npm",
-    `
-
-  Explicitly tell the CLI to bootstrap the application using npm
-`,
-  )
-  .option(
-    "--use-pnpm",
-    `
-
-  Explicitly tell the CLI to bootstrap the application using pnpm
-`,
-  )
-  .option(
-    "--use-yarn",
-    `
-
-  Explicitly tell the CLI to bootstrap the application using Yarn
-`,
-  )
-  .option(
-    "--reset-preferences",
-    `
-
-  Explicitly tell the CLI to reset any stored preferences
-`,
-  )
-  .option(
-    "--template <template>",
-    `
-
-  Select a template to bootstrap the application with.
-`,
-  )
-  .option(
-    "--engine <engine>",
-    `
-
-  Select a chat engine to bootstrap the application with.
-`,
-  )
-  .option(
-    "--framework <framework>",
-    `
-
-  Select a framework to bootstrap the application with.
-`,
-  )
-  .option(
-    "--files <path>",
-    `
-  
-    Specify the path to a local file or folder for chatting.
-`,
-  )
-  .option(
-    "--open-ai-key <key>",
-    `
-
-  Provide an OpenAI API key.
-`,
-  )
-  .option(
-    "--ui <ui>",
-    `
-
-  Select a UI to bootstrap the application with.
-`,
-  )
-  .option(
-    "--frontend",
-    `
-
-  Whether to generate a frontend for your backend.
-`,
-  )
-  .option(
-    "--model <model>",
-    `
-
-  Select OpenAI model to use. E.g. gpt-3.5-turbo.
-`,
-  )
-  .option(
-    "--embedding-model <embeddingModel>",
-    `
-  Select OpenAI embedding model to use. E.g. text-embedding-ada-002.
-`,
-  )
-  .option(
-    "--port <port>",
-    `
-
-  Select UI port.
-`,
-  )
-  .option(
-    "--external-port <external>",
-    `
-
-  Select external port.
-`,
-  )
-  .option(
-    "--post-install-action <action>",
-    `
-
-  Choose an action after installation. For example, 'runApp' or 'dependencies'. The default option is just to generate the app.
-`,
-  )
-  .option(
-    "--vector-db <vectorDb>",
-    `
-
-  Select which vector database you would like to use, such as 'none', 'pg' or 'mongo'. The default option is not to use a vector database and use the local filesystem instead ('none').
-`,
-  )
-  .option(
-    "--tools <tools>",
-    `
-
-  Specify the tools you want to use by providing a comma-separated list. For example, 'wikipedia.WikipediaToolSpec,google.GoogleSearchToolSpec'. Use 'none' to not using any tools.
-`,
-  )
-  .option(
-    "--llama-parse",
-    `
-    Enable LlamaParse.
-`,
-  )
-  .option(
-    "--llama-cloud-key <key>",
-    `
-  Provide a LlamaCloud API key.
-`,
-  )
-  .allowUnknownOption()
-  .parse(process.argv);
-if (process.argv.includes("--no-frontend")) {
-  program.frontend = false;
-}
-if (process.argv.includes("--no-eslint")) {
-  program.eslint = false;
-}
-if (process.argv.includes("--tools")) {
-  if (program.tools === "none") {
-    program.tools = [];
-  } else {
-    program.tools = getTools(program.tools.split(","));
-  }
-}
-if (process.argv.includes("--no-llama-parse")) {
-  program.llamaParse = false;
-}
-
-const packageManager = !!program.useNpm
-  ? "npm"
-  : !!program.usePnpm
-    ? "pnpm"
-    : !!program.useYarn
-      ? "yarn"
-      : getPkgManager();
-
-async function run(): Promise<void> {
-  const conf = new Conf({ projectName: "create-llama" });
-
-  if (program.resetPreferences) {
-    conf.clear();
-    console.log(`Preferences reset successfully`);
-    return;
-  }
-
-  if (typeof projectPath === "string") {
-    projectPath = projectPath.trim();
-  }
-
-  if (!projectPath) {
-    const res = await prompts({
-      onState: onPromptState,
-      type: "text",
-      name: "path",
-      message: "What is your project named?",
-      initial: "my-app",
-      validate: (name) => {
-        const validation = validateNpmName(path.basename(path.resolve(name)));
-        if (validation.valid) {
-          return true;
-        }
-        return "Invalid project name: " + validation.problems![0];
-      },
-    });
-
-    if (typeof res.path === "string") {
-      projectPath = res.path.trim();
-    }
-  }
-
-  if (!projectPath) {
-    console.log(
-      "\nPlease specify the project directory:\n" +
-        `  ${cyan(program.name())} ${green("<project-directory>")}\n` +
-        "For example:\n" +
-        `  ${cyan(program.name())} ${green("my-app")}\n\n` +
-        `Run ${cyan(`${program.name()} --help`)} to see all options.`,
-    );
-    process.exit(1);
-  }
-
-  const resolvedProjectPath = path.resolve(projectPath);
-  const projectName = path.basename(resolvedProjectPath);
-
-  const { valid, problems } = validateNpmName(projectName);
-  if (!valid) {
-    console.error(
-      `Could not create a project called ${red(
-        `"${projectName}"`,
-      )} because of npm naming restrictions:`,
-    );
-
-    problems!.forEach((p) => console.error(`    ${red(bold("*"))} ${p}`));
-    process.exit(1);
-  }
-
-  /**
-   * Verify the project dir is empty or doesn't exist
-   */
-  const root = path.resolve(resolvedProjectPath);
-  const appName = path.basename(root);
-  const folderExists = fs.existsSync(root);
-
-  if (folderExists && !isFolderEmpty(root, appName)) {
-    process.exit(1);
-  }
-
-  const preferences = (conf.get("preferences") || {}) as QuestionArgs;
-  await askQuestions(program as unknown as QuestionArgs, preferences);
-
-  await createApp({
-    template: program.template,
-    framework: program.framework,
-    engine: program.engine,
-    ui: program.ui,
-    appPath: resolvedProjectPath,
-    packageManager,
-    eslint: program.eslint,
-    frontend: program.frontend,
-    openAiKey: program.openAiKey,
-    llamaCloudKey: program.llamaCloudKey,
-    model: program.model,
-    embeddingModel: program.embeddingModel,
-    communityProjectPath: program.communityProjectPath,
-    llamapack: program.llamapack,
-    vectorDb: program.vectorDb,
-    externalPort: program.externalPort,
-    postInstallAction: program.postInstallAction,
-    dataSource: program.dataSource,
-    tools: program.tools,
-  });
-  conf.set("preferences", preferences);
-
-  if (program.postInstallAction === "VSCode") {
-    console.log(`Starting VSCode in ${root}...`);
-    try {
-      execSync(`code . --new-window --goto README.md`, {
-        stdio: "inherit",
-        cwd: root,
-      });
-    } catch (error) {
-      console.log(
-        red(
-          `Failed to start VSCode in ${root}. 
-Got error: ${(error as Error).message}.\n`,
-        ),
-      );
-      console.log(
-        `Make sure you have VSCode installed and added to your PATH (shell alias will not work). 
-Please check ${cyan(
-          terminalLink(
-            "This documentation",
-            `https://code.visualstudio.com/docs/setup/setup-overview`,
-          ),
-        )} for more information.`,
-      );
-    }
-  } else if (program.postInstallAction === "runApp") {
-    console.log(`Running app in ${root}...`);
-    await runApp(
-      root,
-      program.frontend,
-      program.framework,
-      program.port,
-      program.externalPort,
-    );
-  }
-}
-
-const update = checkForUpdate(packageJson).catch(() => null);
-
-async function notifyUpdate(): Promise<void> {
-  try {
-    const res = await update;
-    if (res?.latest) {
-      const updateMessage =
-        packageManager === "yarn"
-          ? "yarn global add create-llama@latest"
-          : packageManager === "pnpm"
-            ? "pnpm add -g create-llama@latest"
-            : "npm i -g create-llama@latest";
-
-      console.log(
-        yellow(bold("A new version of `create-llama` is available!")) +
-          "\n" +
-          "You can update by running: " +
-          cyan(updateMessage) +
-          "\n",
-      );
-    }
-  } catch {
-    // ignore error
-  }
-}
-
-run()
-  .then(notifyUpdate)
-  .catch(async (reason) => {
-    console.log();
-    console.log("Aborting installation.");
-    if (reason.command) {
-      console.log(`  ${cyan(reason.command)} has failed.`);
-    } else {
-      console.log(
-        red("Unexpected error. Please report it as a bug:") + "\n",
-        reason,
-      );
-    }
-    console.log();
-
-    await notifyUpdate();
-
-    process.exit(1);
-  });
diff --git a/packages/create-llama/package.json b/packages/create-llama/package.json
deleted file mode 100644
index 3ed42e94d34cfeedf41920e8f41d883883205b85..0000000000000000000000000000000000000000
--- a/packages/create-llama/package.json
+++ /dev/null
@@ -1,61 +0,0 @@
-{
-  "name": "create-llama",
-  "version": "0.0.28",
-  "keywords": [
-    "rag",
-    "llamaindex",
-    "next.js"
-  ],
-  "description": "Create LlamaIndex-powered apps with one command",
-  "repository": {
-    "type": "git",
-    "url": "https://github.com/run-llama/LlamaIndexTS",
-    "directory": "packages/create-llama"
-  },
-  "license": "MIT",
-  "bin": {
-    "create-llama": "./dist/index.js"
-  },
-  "files": [
-    "dist"
-  ],
-  "scripts": {
-    "clean": "rimraf --glob ./dist ./templates/**/__pycache__ ./templates/**/node_modules ./templates/**/poetry.lock",
-    "dev": "ncc build ./index.ts -w -o dist/",
-    "build": "npm run clean && ncc build ./index.ts -o ./dist/ --minify --no-cache --no-source-map-register",
-    "lint": "eslint . --ignore-pattern dist --ignore-pattern e2e/cache",
-    "e2e": "playwright test",
-    "prepublishOnly": "cd ../../ && pnpm run build:release"
-  },
-  "devDependencies": {
-    "@playwright/test": "^1.41.1",
-    "@types/async-retry": "1.4.2",
-    "@types/ci-info": "2.0.0",
-    "@types/cross-spawn": "6.0.0",
-    "@types/node": "^20.11.7",
-    "@types/prompts": "2.0.1",
-    "@types/tar": "6.1.5",
-    "@types/validate-npm-package-name": "3.0.0",
-    "@vercel/ncc": "0.38.1",
-    "async-retry": "1.3.1",
-    "async-sema": "3.0.1",
-    "ci-info": "github:watson/ci-info#f43f6a1cefff47fb361c88cf4b943fdbcaafe540",
-    "commander": "2.20.0",
-    "conf": "10.2.0",
-    "cross-spawn": "7.0.3",
-    "fast-glob": "3.3.1",
-    "got": "10.7.0",
-    "picocolors": "1.0.0",
-    "prompts": "2.1.0",
-    "rimraf": "^5.0.5",
-    "smol-toml": "^1.1.4",
-    "tar": "6.1.15",
-    "terminal-link": "^3.0.0",
-    "update-check": "1.5.4",
-    "validate-npm-package-name": "3.0.0",
-    "wait-port": "^1.1.0"
-  },
-  "engines": {
-    "node": ">=16.14.0"
-  }
-}
diff --git a/packages/create-llama/playwright.config.ts b/packages/create-llama/playwright.config.ts
deleted file mode 100644
index 0b4b420b7275e2035c1e9b2e60e17ef78dd16e9d..0000000000000000000000000000000000000000
--- a/packages/create-llama/playwright.config.ts
+++ /dev/null
@@ -1,21 +0,0 @@
-/* eslint-disable turbo/no-undeclared-env-vars */
-import { defineConfig, devices } from "@playwright/test";
-
-export default defineConfig({
-  testDir: "./e2e",
-  fullyParallel: true,
-  forbidOnly: !!process.env.CI,
-  retries: process.env.CI ? 2 : 0,
-  workers: process.env.CI ? 1 : undefined,
-  timeout: 1000 * 60 * 5,
-  reporter: "html",
-  use: {
-    trace: "on-first-retry",
-  },
-  projects: [
-    {
-      name: "chromium",
-      use: { ...devices["Desktop Chrome"] },
-    },
-  ],
-});
diff --git a/packages/create-llama/questions.ts b/packages/create-llama/questions.ts
deleted file mode 100644
index af5f17d69fd3b12cdaca5f83d7bbcd57b0fb6f35..0000000000000000000000000000000000000000
--- a/packages/create-llama/questions.ts
+++ /dev/null
@@ -1,747 +0,0 @@
-import { execSync } from "child_process";
-import ciInfo from "ci-info";
-import fs from "fs";
-import path from "path";
-import { blue, green, red } from "picocolors";
-import prompts from "prompts";
-import { InstallAppArgs } from "./create-app";
-import {
-  FileSourceConfig,
-  TemplateDataSourceType,
-  TemplateFramework,
-} from "./helpers";
-import { COMMUNITY_OWNER, COMMUNITY_REPO } from "./helpers/constant";
-import { templatesDir } from "./helpers/dir";
-import { getAvailableLlamapackOptions } from "./helpers/llama-pack";
-import { getRepoRootFolders } from "./helpers/repo";
-import { supportedTools, toolsRequireConfig } from "./helpers/tools";
-
-export type QuestionArgs = Omit<
-  InstallAppArgs,
-  "appPath" | "packageManager"
-> & { files?: string; llamaParse?: boolean };
-const supportedContextFileTypes = [
-  ".pdf",
-  ".doc",
-  ".docx",
-  ".xls",
-  ".xlsx",
-  ".csv",
-];
-const MACOS_FILE_SELECTION_SCRIPT = `
-osascript -l JavaScript -e '
-  a = Application.currentApplication();
-  a.includeStandardAdditions = true;
-  a.chooseFile({ withPrompt: "Please select a file to process:" }).toString()
-'`;
-const MACOS_FOLDER_SELECTION_SCRIPT = `
-osascript -l JavaScript -e '
-  a = Application.currentApplication();
-  a.includeStandardAdditions = true;
-  a.chooseFolder({ withPrompt: "Please select a folder to process:" }).toString()
-'`;
-const WINDOWS_FILE_SELECTION_SCRIPT = `
-Add-Type -AssemblyName System.Windows.Forms
-$openFileDialog = New-Object System.Windows.Forms.OpenFileDialog
-$openFileDialog.InitialDirectory = [Environment]::GetFolderPath('Desktop')
-$result = $openFileDialog.ShowDialog()
-if ($result -eq 'OK') {
-  $openFileDialog.FileName
-}
-`;
-const WINDOWS_FOLDER_SELECTION_SCRIPT = `
-Add-Type -AssemblyName System.windows.forms
-$folderBrowser = New-Object System.Windows.Forms.FolderBrowserDialog
-$dialogResult = $folderBrowser.ShowDialog()
-if ($dialogResult -eq [System.Windows.Forms.DialogResult]::OK)
-{
-    $folderBrowser.SelectedPath
-}
-`;
-
-const defaults: QuestionArgs = {
-  template: "streaming",
-  framework: "nextjs",
-  engine: "simple",
-  ui: "html",
-  eslint: true,
-  frontend: false,
-  openAiKey: "",
-  llamaCloudKey: "",
-  model: "gpt-3.5-turbo",
-  embeddingModel: "text-embedding-ada-002",
-  communityProjectPath: "",
-  llamapack: "",
-  postInstallAction: "dependencies",
-  dataSource: {
-    type: "none",
-    config: {},
-  },
-  tools: [],
-};
-
-const handlers = {
-  onCancel: () => {
-    console.error("Exiting.");
-    process.exit(1);
-  },
-};
-
-const getVectorDbChoices = (framework: TemplateFramework) => {
-  const choices = [
-    {
-      title: "No, just store the data in the file system",
-      value: "none",
-    },
-    { title: "MongoDB", value: "mongo" },
-    { title: "PostgreSQL", value: "pg" },
-    { title: "Pinecone", value: "pinecone" },
-  ];
-
-  const vectordbLang = framework === "fastapi" ? "python" : "typescript";
-  const compPath = path.join(templatesDir, "components");
-  const vectordbPath = path.join(compPath, "vectordbs", vectordbLang);
-
-  const availableChoices = fs
-    .readdirSync(vectordbPath)
-    .filter((file) => fs.statSync(path.join(vectordbPath, file)).isDirectory());
-
-  const displayedChoices = choices.filter((choice) =>
-    availableChoices.includes(choice.value),
-  );
-
-  return displayedChoices;
-};
-
-const getDataSourceChoices = (framework: TemplateFramework) => {
-  const choices = [
-    {
-      title: "No data, just a simple chat",
-      value: "simple",
-    },
-    { title: "Use an example PDF", value: "exampleFile" },
-  ];
-  if (process.platform === "win32" || process.platform === "darwin") {
-    choices.push({
-      title: `Use a local file (${supportedContextFileTypes.join(", ")})`,
-      value: "localFile",
-    });
-    choices.push({
-      title: `Use a local folder`,
-      value: "localFolder",
-    });
-  }
-  if (framework === "fastapi") {
-    choices.push({
-      title: "Use website content (requires Chrome)",
-      value: "web",
-    });
-  }
-  return choices;
-};
-
-const selectLocalContextData = async (type: TemplateDataSourceType) => {
-  try {
-    let selectedPath: string = "";
-    let execScript: string;
-    let execOpts: any = {};
-    switch (process.platform) {
-      case "win32": // Windows
-        execScript =
-          type === "file"
-            ? WINDOWS_FILE_SELECTION_SCRIPT
-            : WINDOWS_FOLDER_SELECTION_SCRIPT;
-        execOpts = { shell: "powershell.exe" };
-        break;
-      case "darwin": // MacOS
-        execScript =
-          type === "file"
-            ? MACOS_FILE_SELECTION_SCRIPT
-            : MACOS_FOLDER_SELECTION_SCRIPT;
-        break;
-      default: // Unsupported OS
-        console.log(red("Unsupported OS error!"));
-        process.exit(1);
-    }
-    selectedPath = execSync(execScript, execOpts).toString().trim();
-    if (type === "file") {
-      const fileType = path.extname(selectedPath);
-      if (!supportedContextFileTypes.includes(fileType)) {
-        console.log(
-          red(
-            `Please select a supported file type: ${supportedContextFileTypes}`,
-          ),
-        );
-        process.exit(1);
-      }
-    }
-    return selectedPath;
-  } catch (error) {
-    console.log(
-      red(
-        "Got an error when trying to select local context data! Please try again or select another data source option.",
-      ),
-    );
-    process.exit(1);
-  }
-};
-
-export const onPromptState = (state: any) => {
-  if (state.aborted) {
-    // If we don't re-enable the terminal cursor before exiting
-    // the program, the cursor will remain hidden
-    process.stdout.write("\x1B[?25h");
-    process.stdout.write("\n");
-    process.exit(1);
-  }
-};
-
-export const askQuestions = async (
-  program: QuestionArgs,
-  preferences: QuestionArgs,
-) => {
-  const getPrefOrDefault = <K extends keyof QuestionArgs>(
-    field: K,
-  ): QuestionArgs[K] => preferences[field] ?? defaults[field];
-
-  // Ask for next action after installation
-  async function askPostInstallAction() {
-    if (program.postInstallAction === undefined) {
-      if (ciInfo.isCI) {
-        program.postInstallAction = getPrefOrDefault("postInstallAction");
-      } else {
-        const actionChoices = [
-          {
-            title: "Just generate code (~1 sec)",
-            value: "none",
-          },
-          {
-            title: "Start in VSCode (~1 sec)",
-            value: "VSCode",
-          },
-          {
-            title: "Generate code and install dependencies (~2 min)",
-            value: "dependencies",
-          },
-        ];
-
-        const openAiKeyConfigured =
-          program.openAiKey || process.env["OPENAI_API_KEY"];
-        // If using LlamaParse, require LlamaCloud API key
-        const llamaCloudKeyConfigured = (
-          program.dataSource?.config as FileSourceConfig
-        )?.useLlamaParse
-          ? program.llamaCloudKey || process.env["LLAMA_CLOUD_API_KEY"]
-          : true;
-        const hasVectorDb = program.vectorDb && program.vectorDb !== "none";
-        // Can run the app if all tools do not require configuration
-        if (
-          !hasVectorDb &&
-          openAiKeyConfigured &&
-          llamaCloudKeyConfigured &&
-          !toolsRequireConfig(program.tools) &&
-          !program.llamapack
-        ) {
-          actionChoices.push({
-            title:
-              "Generate code, install dependencies, and run the app (~2 min)",
-            value: "runApp",
-          });
-        }
-
-        const { action } = await prompts(
-          {
-            type: "select",
-            name: "action",
-            message: "How would you like to proceed?",
-            choices: actionChoices,
-            initial: 1,
-          },
-          handlers,
-        );
-
-        program.postInstallAction = action;
-      }
-    }
-  }
-
-  if (!program.template) {
-    if (ciInfo.isCI) {
-      program.template = getPrefOrDefault("template");
-    } else {
-      const styledRepo = blue(
-        `https://github.com/${COMMUNITY_OWNER}/${COMMUNITY_REPO}`,
-      );
-      const { template } = await prompts(
-        {
-          type: "select",
-          name: "template",
-          message: "Which template would you like to use?",
-          choices: [
-            { title: "Chat without streaming", value: "simple" },
-            { title: "Chat with streaming", value: "streaming" },
-            {
-              title: `Community template from ${styledRepo}`,
-              value: "community",
-            },
-            {
-              title: "Example using a LlamaPack",
-              value: "llamapack",
-            },
-          ],
-          initial: 1,
-        },
-        handlers,
-      );
-      program.template = template;
-      preferences.template = template;
-    }
-  }
-
-  if (program.template === "community") {
-    const rootFolderNames = await getRepoRootFolders(
-      COMMUNITY_OWNER,
-      COMMUNITY_REPO,
-    );
-    const { communityProjectPath } = await prompts(
-      {
-        type: "select",
-        name: "communityProjectPath",
-        message: "Select community template",
-        choices: rootFolderNames.map((name) => ({
-          title: name,
-          value: name,
-        })),
-        initial: 0,
-      },
-      handlers,
-    );
-    program.communityProjectPath = communityProjectPath;
-    preferences.communityProjectPath = communityProjectPath;
-    return; // early return - no further questions needed for community projects
-  }
-
-  if (program.template === "llamapack") {
-    const availableLlamaPacks = await getAvailableLlamapackOptions();
-    const { llamapack } = await prompts(
-      {
-        type: "select",
-        name: "llamapack",
-        message: "Select LlamaPack",
-        choices: availableLlamaPacks.map((pack) => ({
-          title: pack.name,
-          value: pack.folderPath,
-        })),
-        initial: 0,
-      },
-      handlers,
-    );
-    program.llamapack = llamapack;
-    preferences.llamapack = llamapack;
-    await askPostInstallAction();
-    return; // early return - no further questions needed for llamapack projects
-  }
-
-  if (!program.framework) {
-    if (ciInfo.isCI) {
-      program.framework = getPrefOrDefault("framework");
-    } else {
-      const choices = [
-        { title: "Express", value: "express" },
-        { title: "FastAPI (Python)", value: "fastapi" },
-      ];
-      if (program.template === "streaming") {
-        // allow NextJS only for streaming template
-        choices.unshift({ title: "NextJS", value: "nextjs" });
-      }
-
-      const { framework } = await prompts(
-        {
-          type: "select",
-          name: "framework",
-          message: "Which framework would you like to use?",
-          choices,
-          initial: 0,
-        },
-        handlers,
-      );
-      program.framework = framework;
-      preferences.framework = framework;
-    }
-  }
-
-  if (
-    program.template === "streaming" &&
-    (program.framework === "express" || program.framework === "fastapi")
-  ) {
-    // if a backend-only framework is selected, ask whether we should create a frontend
-    // (only for streaming backends)
-    if (program.frontend === undefined) {
-      if (ciInfo.isCI) {
-        program.frontend = getPrefOrDefault("frontend");
-      } else {
-        const styledNextJS = blue("NextJS");
-        const styledBackend = green(
-          program.framework === "express"
-            ? "Express "
-            : program.framework === "fastapi"
-              ? "FastAPI (Python) "
-              : "",
-        );
-        const { frontend } = await prompts({
-          onState: onPromptState,
-          type: "toggle",
-          name: "frontend",
-          message: `Would you like to generate a ${styledNextJS} frontend for your ${styledBackend}backend?`,
-          initial: getPrefOrDefault("frontend"),
-          active: "Yes",
-          inactive: "No",
-        });
-        program.frontend = Boolean(frontend);
-        preferences.frontend = Boolean(frontend);
-      }
-    }
-  } else {
-    program.frontend = false;
-  }
-
-  if (program.framework === "nextjs" || program.frontend) {
-    if (!program.ui) {
-      if (ciInfo.isCI) {
-        program.ui = getPrefOrDefault("ui");
-      } else {
-        const { ui } = await prompts(
-          {
-            type: "select",
-            name: "ui",
-            message: "Which UI would you like to use?",
-            choices: [
-              { title: "Just HTML", value: "html" },
-              { title: "Shadcn", value: "shadcn" },
-            ],
-            initial: 0,
-          },
-          handlers,
-        );
-        program.ui = ui;
-        preferences.ui = ui;
-      }
-    }
-  }
-
-  if (!program.model) {
-    if (ciInfo.isCI) {
-      program.model = getPrefOrDefault("model");
-    } else {
-      const { model } = await prompts(
-        {
-          type: "select",
-          name: "model",
-          message: "Which model would you like to use?",
-          choices: [
-            { title: "gpt-3.5-turbo", value: "gpt-3.5-turbo-0125" },
-            { title: "gpt-4-turbo-preview", value: "gpt-4-turbo-preview" },
-            { title: "gpt-4", value: "gpt-4" },
-            {
-              title: "gpt-4-vision-preview",
-              value: "gpt-4-vision-preview",
-            },
-          ],
-          initial: 0,
-        },
-        handlers,
-      );
-      program.model = model;
-      preferences.model = model;
-    }
-  }
-
-  if (!program.embeddingModel && program.framework === "fastapi") {
-    if (ciInfo.isCI) {
-      program.embeddingModel = getPrefOrDefault("embeddingModel");
-    } else {
-      const { embeddingModel } = await prompts(
-        {
-          type: "select",
-          name: "embeddingModel",
-          message: "Which embedding model would you like to use?",
-          choices: [
-            {
-              title: "text-embedding-ada-002",
-              value: "text-embedding-ada-002",
-            },
-            {
-              title: "text-embedding-3-small",
-              value: "text-embedding-3-small",
-            },
-            {
-              title: "text-embedding-3-large",
-              value: "text-embedding-3-large",
-            },
-          ],
-          initial: 0,
-        },
-        handlers,
-      );
-      program.embeddingModel = embeddingModel;
-      preferences.embeddingModel = embeddingModel;
-    }
-  }
-
-  if (program.files) {
-    // If user specified files option, then the program should use context engine
-    program.engine == "context";
-    if (!fs.existsSync(program.files)) {
-      console.log("File or folder not found");
-      process.exit(1);
-    } else {
-      program.dataSource = {
-        type: fs.lstatSync(program.files).isDirectory() ? "folder" : "file",
-        config: {
-          path: program.files,
-        },
-      };
-    }
-  }
-
-  if (!program.engine) {
-    if (ciInfo.isCI) {
-      program.engine = getPrefOrDefault("engine");
-    } else {
-      const { dataSource } = await prompts(
-        {
-          type: "select",
-          name: "dataSource",
-          message: "Which data source would you like to use?",
-          choices: getDataSourceChoices(program.framework),
-          initial: 1,
-        },
-        handlers,
-      );
-      // Initialize with default config
-      program.dataSource = getPrefOrDefault("dataSource");
-      if (program.dataSource) {
-        switch (dataSource) {
-          case "simple":
-            program.engine = "simple";
-            program.dataSource = { type: "none", config: {} };
-            break;
-          case "exampleFile":
-            program.engine = "context";
-            // Treat example as a folder data source with no config
-            program.dataSource = { type: "folder", config: {} };
-            break;
-          case "localFile":
-            program.engine = "context";
-            program.dataSource = {
-              type: "file",
-              config: {
-                path: await selectLocalContextData("file"),
-              },
-            };
-            break;
-          case "localFolder":
-            program.engine = "context";
-            program.dataSource = {
-              type: "folder",
-              config: {
-                path: await selectLocalContextData("folder"),
-              },
-            };
-            break;
-          case "web":
-            program.engine = "context";
-            program.dataSource.type = "web";
-            break;
-        }
-      }
-    }
-  } else if (!program.dataSource) {
-    // Handle a case when engine is specified but dataSource is not
-    if (program.engine === "context") {
-      program.dataSource = {
-        type: "folder",
-        config: {},
-      };
-    } else if (program.engine === "simple") {
-      program.dataSource = {
-        type: "none",
-        config: {},
-      };
-    }
-  }
-
-  if (
-    (program.dataSource?.type === "file" ||
-      program.dataSource?.type === "folder") &&
-    program.framework === "fastapi"
-  ) {
-    if (ciInfo.isCI) {
-      program.llamaCloudKey = getPrefOrDefault("llamaCloudKey");
-    } else {
-      const dataSourceConfig = program.dataSource.config as FileSourceConfig;
-      dataSourceConfig.useLlamaParse = program.llamaParse;
-
-      // Is pdf file selected as data source or is it a folder data source
-      const askingLlamaParse =
-        dataSourceConfig.useLlamaParse === undefined &&
-        (program.dataSource.type === "folder"
-          ? true
-          : dataSourceConfig.path &&
-            path.extname(dataSourceConfig.path) === ".pdf");
-
-      // Ask if user wants to use LlamaParse
-      if (askingLlamaParse) {
-        const { useLlamaParse } = await prompts(
-          {
-            type: "toggle",
-            name: "useLlamaParse",
-            message:
-              "Would you like to use LlamaParse (improved parser for RAG - requires API key)?",
-            initial: true,
-            active: "yes",
-            inactive: "no",
-          },
-          handlers,
-        );
-        dataSourceConfig.useLlamaParse = useLlamaParse;
-        program.dataSource.config = dataSourceConfig;
-      }
-
-      // Ask for LlamaCloud API key
-      if (
-        dataSourceConfig.useLlamaParse &&
-        program.llamaCloudKey === undefined
-      ) {
-        const { llamaCloudKey } = await prompts(
-          {
-            type: "text",
-            name: "llamaCloudKey",
-            message:
-              "Please provide your LlamaIndex Cloud API key (leave blank to skip):",
-          },
-          handlers,
-        );
-        program.llamaCloudKey = llamaCloudKey;
-      }
-    }
-  }
-
-  if (program.dataSource?.type === "web" && program.framework === "fastapi") {
-    let { baseUrl } = await prompts(
-      {
-        type: "text",
-        name: "baseUrl",
-        message: "Please provide base URL of the website:",
-        initial: "https://www.llamaindex.ai",
-      },
-      handlers,
-    );
-    try {
-      if (!baseUrl.includes("://")) {
-        baseUrl = `https://${baseUrl}`;
-      }
-      const checkUrl = new URL(baseUrl);
-      if (checkUrl.protocol !== "https:" && checkUrl.protocol !== "http:") {
-        throw new Error("Invalid protocol");
-      }
-    } catch (error) {
-      console.log(
-        red(
-          "Invalid URL provided! Please provide a valid URL (e.g. https://www.llamaindex.ai)",
-        ),
-      );
-      process.exit(1);
-    }
-    program.dataSource.config = {
-      baseUrl: baseUrl,
-      depth: 1,
-    };
-  }
-
-  if (program.engine !== "simple" && !program.vectorDb) {
-    if (ciInfo.isCI) {
-      program.vectorDb = getPrefOrDefault("vectorDb");
-    } else {
-      const { vectorDb } = await prompts(
-        {
-          type: "select",
-          name: "vectorDb",
-          message: "Would you like to use a vector database?",
-          choices: getVectorDbChoices(program.framework),
-          initial: 0,
-        },
-        handlers,
-      );
-      program.vectorDb = vectorDb;
-      preferences.vectorDb = vectorDb;
-    }
-  }
-
-  if (
-    !program.tools &&
-    program.framework === "fastapi" &&
-    program.engine === "context"
-  ) {
-    if (ciInfo.isCI) {
-      program.tools = getPrefOrDefault("tools");
-    } else {
-      const toolChoices = supportedTools.map((tool) => ({
-        title: tool.display,
-        value: tool.name,
-      }));
-      const { toolsName } = await prompts({
-        type: "multiselect",
-        name: "toolsName",
-        message:
-          "Would you like to build an agent using tools? If so, select the tools here, otherwise just press enter",
-        choices: toolChoices,
-      });
-      const tools = toolsName?.map((tool: string) =>
-        supportedTools.find((t) => t.name === tool),
-      );
-      program.tools = tools;
-      preferences.tools = tools;
-    }
-  }
-
-  if (!program.openAiKey) {
-    const { key } = await prompts(
-      {
-        type: "text",
-        name: "key",
-        message: "Please provide your OpenAI API key (leave blank to skip):",
-      },
-      handlers,
-    );
-    program.openAiKey = key;
-    preferences.openAiKey = key;
-  }
-
-  if (program.framework !== "fastapi" && program.eslint === undefined) {
-    if (ciInfo.isCI) {
-      program.eslint = getPrefOrDefault("eslint");
-    } else {
-      const styledEslint = blue("ESLint");
-      const { eslint } = await prompts({
-        onState: onPromptState,
-        type: "toggle",
-        name: "eslint",
-        message: `Would you like to use ${styledEslint}?`,
-        initial: getPrefOrDefault("eslint"),
-        active: "Yes",
-        inactive: "No",
-      });
-      program.eslint = Boolean(eslint);
-      preferences.eslint = Boolean(eslint);
-    }
-  }
-
-  await askPostInstallAction();
-
-  // TODO: consider using zod to validate the input (doesn't work like this as not every option is required)
-  // templateUISchema.parse(program.ui);
-  // templateEngineSchema.parse(program.engine);
-  // templateFrameworkSchema.parse(program.framework);
-  // templateTypeSchema.parse(program.template);``
-};
diff --git a/packages/create-llama/templates/.gitignore b/packages/create-llama/templates/.gitignore
deleted file mode 100644
index ec6c67b630467343abb46cfeea0535ce4b339554..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-__pycache__
-poetry.lock
-storage
diff --git a/packages/create-llama/templates/README-fullstack.md b/packages/create-llama/templates/README-fullstack.md
deleted file mode 100644
index 5a41b8cfc370f4fe99269331065bdee8b6aa8e8c..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/README-fullstack.md
+++ /dev/null
@@ -1,18 +0,0 @@
-This is a [LlamaIndex](https://www.llamaindex.ai/) project bootstrapped with [`create-llama`](https://github.com/run-llama/LlamaIndexTS/tree/main/packages/create-llama).
-
-## Getting Started
-
-First, startup the backend as described in the [backend README](./backend/README.md).
-
-Second, run the development server of the frontend as described in the [frontend README](./frontend/README.md).
-
-Open [http://localhost:3000](http://localhost:3000) with your browser to see the result.
-
-## Learn More
-
-To learn more about LlamaIndex, take a look at the following resources:
-
-- [LlamaIndex Documentation](https://docs.llamaindex.ai) - learn about LlamaIndex (Python features).
-- [LlamaIndexTS Documentation](https://ts.llamaindex.ai) - learn about LlamaIndex (Typescript features).
-
-You can check out [the LlamaIndexTS GitHub repository](https://github.com/run-llama/LlamaIndexTS) - your feedback and contributions are welcome!
diff --git a/packages/create-llama/templates/components/data/101.pdf b/packages/create-llama/templates/components/data/101.pdf
deleted file mode 100644
index ae5acffd5398b7c59e2df9e6dead2d99128b719c..0000000000000000000000000000000000000000
Binary files a/packages/create-llama/templates/components/data/101.pdf and /dev/null differ
diff --git a/packages/create-llama/templates/components/engines/python/agent/__init__.py b/packages/create-llama/templates/components/engines/python/agent/__init__.py
deleted file mode 100644
index 52e363941fcd64238f06fba4d8feaf386dce70fd..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/engines/python/agent/__init__.py
+++ /dev/null
@@ -1,28 +0,0 @@
-import os
-from llama_index.core.settings import Settings
-from llama_index.core.agent import AgentRunner
-from llama_index.core.tools.query_engine import QueryEngineTool
-from app.engine.tools import ToolFactory
-from app.engine.index import get_index
-
-
-def get_chat_engine():
-    system_prompt = os.getenv("SYSTEM_PROMPT")
-    top_k = os.getenv("TOP_K", "3")
-    tools = []
-
-    # Add query tool
-    index = get_index()
-    query_engine = index.as_query_engine(similarity_top_k=int(top_k))
-    query_engine_tool = QueryEngineTool.from_defaults(query_engine=query_engine)
-    tools.append(query_engine_tool)
-
-    # Add additional tools
-    tools += ToolFactory.from_env()
-
-    return AgentRunner.from_llm(
-        llm=Settings.llm,
-        tools=tools,
-        system_prompt=system_prompt,
-        verbose=True,
-    )
diff --git a/packages/create-llama/templates/components/engines/python/agent/tools.py b/packages/create-llama/templates/components/engines/python/agent/tools.py
deleted file mode 100644
index fafc1fcc874477893e7ecc4ae9add4bcb5e062b1..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/engines/python/agent/tools.py
+++ /dev/null
@@ -1,33 +0,0 @@
-import json
-import importlib
-
-from llama_index.core.tools.tool_spec.base import BaseToolSpec
-from llama_index.core.tools.function_tool import FunctionTool
-
-
-class ToolFactory:
-
-    @staticmethod
-    def create_tool(tool_name: str, **kwargs) -> list[FunctionTool]:
-        try:
-            tool_package, tool_cls_name = tool_name.split(".")
-            module_name = f"llama_index.tools.{tool_package}"
-            module = importlib.import_module(module_name)
-            tool_class = getattr(module, tool_cls_name)
-            tool_spec: BaseToolSpec = tool_class(**kwargs)
-            return tool_spec.to_tool_list()
-        except (ImportError, AttributeError) as e:
-            raise ValueError(f"Unsupported tool: {tool_name}") from e
-        except TypeError as e:
-            raise ValueError(
-                f"Could not create tool: {tool_name}. With config: {kwargs}"
-            ) from e
-
-    @staticmethod
-    def from_env() -> list[FunctionTool]:
-        tools = []
-        with open("tools_config.json", "r") as f:
-            tool_configs = json.load(f)
-            for name, config in tool_configs.items():
-                tools += ToolFactory.create_tool(name, **config)
-        return tools
diff --git a/packages/create-llama/templates/components/engines/python/chat/__init__.py b/packages/create-llama/templates/components/engines/python/chat/__init__.py
deleted file mode 100644
index 65624c32c63c484958f2c566574ea029f731af39..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/engines/python/chat/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-import os
-from app.engine.index import get_index
-
-
-def get_chat_engine():
-    system_prompt = os.getenv("SYSTEM_PROMPT")
-    top_k = os.getenv("TOP_K", 3)
-
-    return get_index().as_chat_engine(
-        similarity_top_k=int(top_k),
-        system_prompt=system_prompt,
-        chat_mode="condense_plus_context",
-    )
diff --git a/packages/create-llama/templates/components/loaders/python/file/loader.py b/packages/create-llama/templates/components/loaders/python/file/loader.py
deleted file mode 100644
index c1178444e255602c994b0ce45e84ca7baa61b404..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/loaders/python/file/loader.py
+++ /dev/null
@@ -1,7 +0,0 @@
-from llama_index.core.readers import SimpleDirectoryReader
-
-DATA_DIR = "data"  # directory containing the documents
-
-
-def get_documents():
-    return SimpleDirectoryReader(DATA_DIR).load_data()
diff --git a/packages/create-llama/templates/components/loaders/python/llama_parse/loader.py b/packages/create-llama/templates/components/loaders/python/llama_parse/loader.py
deleted file mode 100644
index 4c9fbf109f32918d70c06d08e0a671c5377b4f0b..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/loaders/python/llama_parse/loader.py
+++ /dev/null
@@ -1,17 +0,0 @@
-import os
-from llama_parse import LlamaParse
-from llama_index.core import SimpleDirectoryReader
-
-DATA_DIR = "data"  # directory containing the documents
-
-
-def get_documents():
-    if os.getenv("LLAMA_CLOUD_API_KEY") is None:
-        raise ValueError(
-            "LLAMA_CLOUD_API_KEY environment variable is not set. "
-            "Please set it in .env file or in your shell environment then run again!"
-        )
-    parser = LlamaParse(result_type="markdown", verbose=True, language="en")
-
-    reader = SimpleDirectoryReader(DATA_DIR, file_extractor={".pdf": parser})
-    return reader.load_data()
diff --git a/packages/create-llama/templates/components/loaders/python/web/loader.py b/packages/create-llama/templates/components/loaders/python/web/loader.py
deleted file mode 100644
index bc6d0496dda8fc7a9e0b1a79e1a4084fec6a6cb6..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/loaders/python/web/loader.py
+++ /dev/null
@@ -1,13 +0,0 @@
-import os
-from llama_index.readers.web import WholeSiteReader
-
-
-def get_documents():
-    # Initialize the scraper with a prefix URL and maximum depth
-    scraper = WholeSiteReader(
-        prefix=os.environ.get("URL_PREFIX"), max_depth=int(os.environ.get("MAX_DEPTH"))
-    )
-    # Start scraping from a base URL
-    documents = scraper.load_data(base_url=os.environ.get("BASE_URL"))
-
-    return documents
diff --git a/packages/create-llama/templates/components/sample-projects/llamapack/README-template.md b/packages/create-llama/templates/components/sample-projects/llamapack/README-template.md
deleted file mode 100644
index f669c38c2be9506daea143fd08a6a42adb28f11d..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/sample-projects/llamapack/README-template.md
+++ /dev/null
@@ -1,16 +0,0 @@
----
-
-## Quickstart
-
-1. Check above instructions for setting up your environment and export required environment variables
-   For example, if you are using bash, you can run the following command to set up OpenAI API key
-
-```bash
-export OPENAI_API_KEY=your_api_key
-```
-
-2. Run the example
-
-```
-poetry run python example.py
-```
diff --git a/packages/create-llama/templates/components/sample-projects/llamapack/pyproject.toml b/packages/create-llama/templates/components/sample-projects/llamapack/pyproject.toml
deleted file mode 100644
index 4bd28bd809d189b8f6d505459c0ab14a2a2f0b7f..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/sample-projects/llamapack/pyproject.toml
+++ /dev/null
@@ -1,17 +0,0 @@
-[tool.poetry]
-name = "app"
-version = "0.1.0"
-description = "Llama Pack Example"
-authors = ["Marcus Schiesser <mail@marcusschiesser.de>"]
-readme = "README.md"
-
-[tool.poetry.dependencies]
-python = "^3.11,<3.12"
-llama-index = "^0.10.6"
-llama-index-readers-file = "^0.1.3"
-python-dotenv = "^1.0.0"
-
-
-[build-system]
-requires = ["poetry-core"]
-build-backend = "poetry.core.masonry.api"
diff --git a/packages/create-llama/templates/components/ui/html/chat/chat-avatar.tsx b/packages/create-llama/templates/components/ui/html/chat/chat-avatar.tsx
deleted file mode 100644
index cd241104e4ef210c728aec47a1ab8b0161ad6538..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/ui/html/chat/chat-avatar.tsx
+++ /dev/null
@@ -1,34 +0,0 @@
-"use client";
-
-import Image from "next/image";
-import { Message } from "./chat-messages";
-
-export default function ChatAvatar(message: Message) {
-  if (message.role === "user") {
-    return (
-      <div className="flex h-8 w-8 shrink-0 select-none items-center justify-center rounded-md border shadow bg-background">
-        <svg
-          xmlns="http://www.w3.org/2000/svg"
-          viewBox="0 0 256 256"
-          fill="currentColor"
-          className="h-4 w-4"
-        >
-          <path d="M230.92 212c-15.23-26.33-38.7-45.21-66.09-54.16a72 72 0 1 0-73.66 0c-27.39 8.94-50.86 27.82-66.09 54.16a8 8 0 1 0 13.85 8c18.84-32.56 52.14-52 89.07-52s70.23 19.44 89.07 52a8 8 0 1 0 13.85-8ZM72 96a56 56 0 1 1 56 56 56.06 56.06 0 0 1-56-56Z"></path>
-        </svg>
-      </div>
-    );
-  }
-
-  return (
-    <div className="flex h-8 w-8 shrink-0 select-none items-center justify-center rounded-md border  bg-black text-white">
-      <Image
-        className="rounded-md"
-        src="/llama.png"
-        alt="Llama Logo"
-        width={24}
-        height={24}
-        priority
-      />
-    </div>
-  );
-}
diff --git a/packages/create-llama/templates/components/ui/html/chat/chat-input.tsx b/packages/create-llama/templates/components/ui/html/chat/chat-input.tsx
deleted file mode 100644
index 7c3e87280b03ed571e8fc081a38c15a8d36df1ab..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/ui/html/chat/chat-input.tsx
+++ /dev/null
@@ -1,43 +0,0 @@
-"use client";
-
-export interface ChatInputProps {
-  /** The current value of the input */
-  input?: string;
-  /** An input/textarea-ready onChange handler to control the value of the input */
-  handleInputChange?: (
-    e:
-      | React.ChangeEvent<HTMLInputElement>
-      | React.ChangeEvent<HTMLTextAreaElement>,
-  ) => void;
-  /** Form submission handler to automatically reset input and append a user message  */
-  handleSubmit: (e: React.FormEvent<HTMLFormElement>) => void;
-  isLoading: boolean;
-  multiModal?: boolean;
-}
-
-export default function ChatInput(props: ChatInputProps) {
-  return (
-    <>
-      <form
-        onSubmit={props.handleSubmit}
-        className="flex items-start justify-between w-full max-w-5xl p-4 bg-white rounded-xl shadow-xl gap-4"
-      >
-        <input
-          autoFocus
-          name="message"
-          placeholder="Type a message"
-          className="w-full p-4 rounded-xl shadow-inner flex-1"
-          value={props.input}
-          onChange={props.handleInputChange}
-        />
-        <button
-          disabled={props.isLoading}
-          type="submit"
-          className="p-4 text-white rounded-xl shadow-xl bg-gradient-to-r from-cyan-500 to-sky-500 disabled:opacity-50 disabled:cursor-not-allowed"
-        >
-          Send message
-        </button>
-      </form>
-    </>
-  );
-}
diff --git a/packages/create-llama/templates/components/ui/html/chat/chat-item.tsx b/packages/create-llama/templates/components/ui/html/chat/chat-item.tsx
deleted file mode 100644
index 2244f729a8ba668121ab5ec0842963d22153ef92..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/ui/html/chat/chat-item.tsx
+++ /dev/null
@@ -1,13 +0,0 @@
-"use client";
-
-import ChatAvatar from "./chat-avatar";
-import { Message } from "./chat-messages";
-
-export default function ChatItem(message: Message) {
-  return (
-    <div className="flex items-start gap-4 pt-5">
-      <ChatAvatar {...message} />
-      <p className="break-words">{message.content}</p>
-    </div>
-  );
-}
diff --git a/packages/create-llama/templates/components/ui/html/chat/chat-messages.tsx b/packages/create-llama/templates/components/ui/html/chat/chat-messages.tsx
deleted file mode 100644
index 0e978394015bd985af40646e87fa6620e9001a2f..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/ui/html/chat/chat-messages.tsx
+++ /dev/null
@@ -1,48 +0,0 @@
-"use client";
-
-import { useEffect, useRef } from "react";
-import ChatItem from "./chat-item";
-
-export interface Message {
-  id: string;
-  content: string;
-  role: string;
-}
-
-export default function ChatMessages({
-  messages,
-  isLoading,
-  reload,
-  stop,
-}: {
-  messages: Message[];
-  isLoading?: boolean;
-  stop?: () => void;
-  reload?: () => void;
-}) {
-  const scrollableChatContainerRef = useRef<HTMLDivElement>(null);
-
-  const scrollToBottom = () => {
-    if (scrollableChatContainerRef.current) {
-      scrollableChatContainerRef.current.scrollTop =
-        scrollableChatContainerRef.current.scrollHeight;
-    }
-  };
-
-  useEffect(() => {
-    scrollToBottom();
-  }, [messages.length]);
-
-  return (
-    <div className="w-full max-w-5xl p-4 bg-white rounded-xl shadow-xl">
-      <div
-        className="flex flex-col gap-5 divide-y h-[50vh] overflow-auto"
-        ref={scrollableChatContainerRef}
-      >
-        {messages.map((m: Message) => (
-          <ChatItem key={m.id} {...m} />
-        ))}
-      </div>
-    </div>
-  );
-}
diff --git a/packages/create-llama/templates/components/ui/html/chat/index.ts b/packages/create-llama/templates/components/ui/html/chat/index.ts
deleted file mode 100644
index 5de7dce47fc1e2330759171db291eedbca19e722..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/ui/html/chat/index.ts
+++ /dev/null
@@ -1,6 +0,0 @@
-import ChatInput from "./chat-input";
-import ChatMessages from "./chat-messages";
-
-export type { ChatInputProps } from "./chat-input";
-export type { Message } from "./chat-messages";
-export { ChatInput, ChatMessages };
diff --git a/packages/create-llama/templates/components/vectordbs/python/mongo/__init__.py b/packages/create-llama/templates/components/vectordbs/python/mongo/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/packages/create-llama/templates/components/vectordbs/python/mongo/generate.py b/packages/create-llama/templates/components/vectordbs/python/mongo/generate.py
deleted file mode 100644
index 69d520711537c0c2b35f7be5bb2697cb9690cf5c..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/vectordbs/python/mongo/generate.py
+++ /dev/null
@@ -1,43 +0,0 @@
-from dotenv import load_dotenv
-
-load_dotenv()
-
-import os
-import logging
-from llama_index.core.storage import StorageContext
-from llama_index.core.indices import VectorStoreIndex
-from llama_index.vector_stores.mongodb import MongoDBAtlasVectorSearch
-from app.settings import init_settings
-from app.engine.loader import get_documents
-
-logging.basicConfig(level=logging.INFO)
-logger = logging.getLogger()
-
-
-def generate_datasource():
-    logger.info("Creating new index")
-    # load the documents and create the index
-    documents = get_documents()
-    store = MongoDBAtlasVectorSearch(
-        db_name=os.environ["MONGODB_DATABASE"],
-        collection_name=os.environ["MONGODB_VECTORS"],
-        index_name=os.environ["MONGODB_VECTOR_INDEX"],
-    )
-    storage_context = StorageContext.from_defaults(vector_store=store)
-    VectorStoreIndex.from_documents(
-        documents,
-        storage_context=storage_context,
-        show_progress=True,  # this will show you a progress bar as the embeddings are created
-    )
-    logger.info(
-        f"Successfully created embeddings in the MongoDB collection {os.environ['MONGODB_VECTORS']}"
-    )
-    logger.info(
-        """IMPORTANT: You can't query your index yet because you need to create a vector search index in MongoDB's UI now.
-See https://github.com/run-llama/mongodb-demo/tree/main?tab=readme-ov-file#create-a-vector-search-index"""
-    )
-
-
-if __name__ == "__main__":
-    init_settings()
-    generate_datasource()
diff --git a/packages/create-llama/templates/components/vectordbs/python/mongo/index.py b/packages/create-llama/templates/components/vectordbs/python/mongo/index.py
deleted file mode 100644
index 6dba7c1d05ddd8e77853f081632b3232a89bc7e2..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/vectordbs/python/mongo/index.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import logging
-import os
-
-from llama_index.core.indices import VectorStoreIndex
-from llama_index.vector_stores.mongodb import MongoDBAtlasVectorSearch
-
-
-logger = logging.getLogger("uvicorn")
-
-
-def get_index():
-    logger.info("Connecting to index from MongoDB...")
-    store = MongoDBAtlasVectorSearch(
-        db_name=os.environ["MONGODB_DATABASE"],
-        collection_name=os.environ["MONGODB_VECTORS"],
-        index_name=os.environ["MONGODB_VECTOR_INDEX"],
-    )
-    index = VectorStoreIndex.from_vector_store(store)
-    logger.info("Finished connecting to index from MongoDB.")
-    return index
diff --git a/packages/create-llama/templates/components/vectordbs/python/none/__init__.py b/packages/create-llama/templates/components/vectordbs/python/none/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/packages/create-llama/templates/components/vectordbs/python/none/constants.py b/packages/create-llama/templates/components/vectordbs/python/none/constants.py
deleted file mode 100644
index 254998ebbda96cd491b7914ed795eb6b5cfe0d39..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/vectordbs/python/none/constants.py
+++ /dev/null
@@ -1 +0,0 @@
-STORAGE_DIR = "storage"  # directory to cache the generated index
diff --git a/packages/create-llama/templates/components/vectordbs/python/none/generate.py b/packages/create-llama/templates/components/vectordbs/python/none/generate.py
deleted file mode 100644
index 3c8055f3794669d37d599a502a6f7eb802202606..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/vectordbs/python/none/generate.py
+++ /dev/null
@@ -1,32 +0,0 @@
-from dotenv import load_dotenv
-
-load_dotenv()
-
-import logging
-from llama_index.core.indices import (
-    VectorStoreIndex,
-)
-from app.engine.constants import STORAGE_DIR
-from app.engine.loader import get_documents
-from app.settings import init_settings
-
-
-logging.basicConfig(level=logging.INFO)
-logger = logging.getLogger()
-
-
-def generate_datasource():
-    logger.info("Creating new index")
-    # load the documents and create the index
-    documents = get_documents()
-    index = VectorStoreIndex.from_documents(
-        documents,
-    )
-    # store it for later
-    index.storage_context.persist(STORAGE_DIR)
-    logger.info(f"Finished creating new index. Stored in {STORAGE_DIR}")
-
-
-if __name__ == "__main__":
-    init_settings()
-    generate_datasource()
diff --git a/packages/create-llama/templates/components/vectordbs/python/none/index.py b/packages/create-llama/templates/components/vectordbs/python/none/index.py
deleted file mode 100644
index 4dcc858a03ff1f0f8226236f436add8e705bb80f..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/vectordbs/python/none/index.py
+++ /dev/null
@@ -1,23 +0,0 @@
-import logging
-import os
-
-from app.engine.constants import STORAGE_DIR
-from llama_index.core.storage import StorageContext
-from llama_index.core.indices import load_index_from_storage
-
-logger = logging.getLogger("uvicorn")
-
-
-def get_index():
-    # check if storage already exists
-    if not os.path.exists(STORAGE_DIR):
-        raise Exception(
-            "StorageContext is empty - call 'python app/engine/generate.py' to generate the storage first"
-        )
-
-    # load the existing index
-    logger.info(f"Loading index from {STORAGE_DIR}...")
-    storage_context = StorageContext.from_defaults(persist_dir=STORAGE_DIR)
-    index = load_index_from_storage(storage_context)
-    logger.info(f"Finished loading index from {STORAGE_DIR}")
-    return index
diff --git a/packages/create-llama/templates/components/vectordbs/python/pg/__init__.py b/packages/create-llama/templates/components/vectordbs/python/pg/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/packages/create-llama/templates/components/vectordbs/python/pg/constants.py b/packages/create-llama/templates/components/vectordbs/python/pg/constants.py
deleted file mode 100644
index a4ebd91831da4f3e6ff585106eee69fcf6993b0e..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/vectordbs/python/pg/constants.py
+++ /dev/null
@@ -1,2 +0,0 @@
-PGVECTOR_SCHEMA = "public"
-PGVECTOR_TABLE = "llamaindex_embedding"
\ No newline at end of file
diff --git a/packages/create-llama/templates/components/vectordbs/python/pg/generate.py b/packages/create-llama/templates/components/vectordbs/python/pg/generate.py
deleted file mode 100644
index 608beb2e81bc84a67a18fdd21a820eb207c4ad70..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/vectordbs/python/pg/generate.py
+++ /dev/null
@@ -1,35 +0,0 @@
-from dotenv import load_dotenv
-
-load_dotenv()
-
-import logging
-from llama_index.core.indices import VectorStoreIndex
-from llama_index.core.storage import StorageContext
-
-from app.engine.loader import get_documents
-from app.settings import init_settings
-from app.engine.utils import init_pg_vector_store_from_env
-
-logging.basicConfig(level=logging.INFO)
-logger = logging.getLogger()
-
-
-def generate_datasource():
-    logger.info("Creating new index")
-    # load the documents and create the index
-    documents = get_documents()
-    store = init_pg_vector_store_from_env()
-    storage_context = StorageContext.from_defaults(vector_store=store)
-    VectorStoreIndex.from_documents(
-        documents,
-        storage_context=storage_context,
-        show_progress=True,  # this will show you a progress bar as the embeddings are created
-    )
-    logger.info(
-        f"Successfully created embeddings in the PG vector store, schema={store.schema_name} table={store.table_name}"
-    )
-
-
-if __name__ == "__main__":
-    init_settings()
-    generate_datasource()
diff --git a/packages/create-llama/templates/components/vectordbs/python/pg/index.py b/packages/create-llama/templates/components/vectordbs/python/pg/index.py
deleted file mode 100644
index 3c4f31800b4f06fd286e8c23ab3fbdca393c4fca..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/vectordbs/python/pg/index.py
+++ /dev/null
@@ -1,13 +0,0 @@
-import logging
-from llama_index.core.indices.vector_store import VectorStoreIndex
-from app.engine.utils import init_pg_vector_store_from_env
-
-logger = logging.getLogger("uvicorn")
-
-
-def get_index():
-    logger.info("Connecting to index from PGVector...")
-    store = init_pg_vector_store_from_env()
-    index = VectorStoreIndex.from_vector_store(store)
-    logger.info("Finished connecting to index from PGVector.")
-    return index
diff --git a/packages/create-llama/templates/components/vectordbs/python/pg/utils.py b/packages/create-llama/templates/components/vectordbs/python/pg/utils.py
deleted file mode 100644
index 39127846dfddb706a22fc1b20e3cef1bf98751a7..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/vectordbs/python/pg/utils.py
+++ /dev/null
@@ -1,27 +0,0 @@
-import os
-from llama_index.vector_stores.postgres import PGVectorStore
-from urllib.parse import urlparse
-from app.engine.constants import PGVECTOR_SCHEMA, PGVECTOR_TABLE
-
-
-def init_pg_vector_store_from_env():
-    original_conn_string = os.environ.get("PG_CONNECTION_STRING")
-    if original_conn_string is None or original_conn_string == "":
-        raise ValueError("PG_CONNECTION_STRING environment variable is not set.")
-
-    # The PGVectorStore requires both two connection strings, one for psycopg2 and one for asyncpg
-    # Update the configured scheme with the psycopg2 and asyncpg schemes
-    original_scheme = urlparse(original_conn_string).scheme + "://"
-    conn_string = original_conn_string.replace(
-        original_scheme, "postgresql+psycopg2://"
-    )
-    async_conn_string = original_conn_string.replace(
-        original_scheme, "postgresql+asyncpg://"
-    )
-
-    return PGVectorStore(
-        connection_string=conn_string,
-        async_connection_string=async_conn_string,
-        schema_name=PGVECTOR_SCHEMA,
-        table_name=PGVECTOR_TABLE,
-    )
diff --git a/packages/create-llama/templates/components/vectordbs/python/pinecone/__init__.py b/packages/create-llama/templates/components/vectordbs/python/pinecone/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/packages/create-llama/templates/components/vectordbs/python/pinecone/generate.py b/packages/create-llama/templates/components/vectordbs/python/pinecone/generate.py
deleted file mode 100644
index 4e14648b0b008d244e37532dfdd4f1304c95853c..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/vectordbs/python/pinecone/generate.py
+++ /dev/null
@@ -1,39 +0,0 @@
-from dotenv import load_dotenv
-
-load_dotenv()
-
-import os
-import logging
-from llama_index.core.storage import StorageContext
-from llama_index.core.indices import VectorStoreIndex
-from llama_index.vector_stores.pinecone import PineconeVectorStore
-from app.settings import init_settings
-from app.engine.loader import get_documents
-
-logging.basicConfig(level=logging.INFO)
-logger = logging.getLogger()
-
-
-def generate_datasource():
-    logger.info("Creating new index")
-    # load the documents and create the index
-    documents = get_documents()
-    store = PineconeVectorStore(
-        api_key=os.environ["PINECONE_API_KEY"],
-        index_name=os.environ["PINECONE_INDEX_NAME"],
-        environment=os.environ["PINECONE_ENVIRONMENT"],
-    )
-    storage_context = StorageContext.from_defaults(vector_store=store)
-    VectorStoreIndex.from_documents(
-        documents,
-        storage_context=storage_context,
-        show_progress=True,  # this will show you a progress bar as the embeddings are created
-    )
-    logger.info(
-        f"Successfully created embeddings and save to your Pinecone index {os.environ['PINECONE_INDEX_NAME']}"
-    )
-
-
-if __name__ == "__main__":
-    init_settings()
-    generate_datasource()
diff --git a/packages/create-llama/templates/components/vectordbs/python/pinecone/index.py b/packages/create-llama/templates/components/vectordbs/python/pinecone/index.py
deleted file mode 100644
index 98824ffdc5f197ad9d3d0a3b546ffbee64f4f7ed..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/vectordbs/python/pinecone/index.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import logging
-import os
-
-from llama_index.core.indices import VectorStoreIndex
-from llama_index.vector_stores.pinecone import PineconeVectorStore
-
-
-logger = logging.getLogger("uvicorn")
-
-
-def get_index():
-    logger.info("Connecting to index from Pinecone...")
-    store = PineconeVectorStore(
-        api_key=os.environ["PINECONE_API_KEY"],
-        index_name=os.environ["PINECONE_INDEX_NAME"],
-        environment=os.environ["PINECONE_ENVIRONMENT"],
-    )
-    index = VectorStoreIndex.from_vector_store(store)
-    logger.info("Finished connecting to index from Pinecone.")
-    return index
diff --git a/packages/create-llama/templates/components/vectordbs/typescript/mongo/generate.mjs b/packages/create-llama/templates/components/vectordbs/typescript/mongo/generate.mjs
deleted file mode 100644
index 7337d1222b997a044734396109d1da4ad3019699..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/vectordbs/typescript/mongo/generate.mjs
+++ /dev/null
@@ -1,49 +0,0 @@
-/* eslint-disable turbo/no-undeclared-env-vars */
-import * as dotenv from "dotenv";
-import {
-  MongoDBAtlasVectorSearch,
-  SimpleDirectoryReader,
-  VectorStoreIndex,
-  storageContextFromDefaults,
-} from "llamaindex";
-import { MongoClient } from "mongodb";
-import { STORAGE_DIR, checkRequiredEnvVars } from "./shared.mjs";
-
-dotenv.config();
-
-const mongoUri = process.env.MONGO_URI;
-const databaseName = process.env.MONGODB_DATABASE;
-const vectorCollectionName = process.env.MONGODB_VECTORS;
-const indexName = process.env.MONGODB_VECTOR_INDEX;
-
-async function loadAndIndex() {
-  // Create a new client and connect to the server
-  const client = new MongoClient(mongoUri);
-
-  // load objects from storage and convert them into LlamaIndex Document objects
-  const documents = await new SimpleDirectoryReader().loadData({
-    directoryPath: STORAGE_DIR,
-  });
-
-  // create Atlas as a vector store
-  const vectorStore = new MongoDBAtlasVectorSearch({
-    mongodbClient: client,
-    dbName: databaseName,
-    collectionName: vectorCollectionName, // this is where your embeddings will be stored
-    indexName: indexName, // this is the name of the index you will need to create
-  });
-
-  // now create an index from all the Documents and store them in Atlas
-  const storageContext = await storageContextFromDefaults({ vectorStore });
-  await VectorStoreIndex.fromDocuments(documents, { storageContext });
-  console.log(
-    `Successfully created embeddings in the MongoDB collection ${vectorCollectionName}.`,
-  );
-  await client.close();
-}
-
-(async () => {
-  checkRequiredEnvVars();
-  await loadAndIndex();
-  console.log("Finished generating storage.");
-})();
diff --git a/packages/create-llama/templates/components/vectordbs/typescript/mongo/index.ts b/packages/create-llama/templates/components/vectordbs/typescript/mongo/index.ts
deleted file mode 100644
index 844789c60d538c68c4e364127728544592ebbc47..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/vectordbs/typescript/mongo/index.ts
+++ /dev/null
@@ -1,37 +0,0 @@
-/* eslint-disable turbo/no-undeclared-env-vars */
-import {
-  ContextChatEngine,
-  LLM,
-  MongoDBAtlasVectorSearch,
-  serviceContextFromDefaults,
-  VectorStoreIndex,
-} from "llamaindex";
-import { MongoClient } from "mongodb";
-import { checkRequiredEnvVars, CHUNK_OVERLAP, CHUNK_SIZE } from "./shared.mjs";
-
-async function getDataSource(llm: LLM) {
-  checkRequiredEnvVars();
-  const client = new MongoClient(process.env.MONGO_URI!);
-  const serviceContext = serviceContextFromDefaults({
-    llm,
-    chunkSize: CHUNK_SIZE,
-    chunkOverlap: CHUNK_OVERLAP,
-  });
-  const store = new MongoDBAtlasVectorSearch({
-    mongodbClient: client,
-    dbName: process.env.MONGODB_DATABASE,
-    collectionName: process.env.MONGODB_VECTORS,
-    indexName: process.env.MONGODB_VECTOR_INDEX,
-  });
-
-  return await VectorStoreIndex.fromVectorStore(store, serviceContext);
-}
-
-export async function createChatEngine(llm: LLM) {
-  const index = await getDataSource(llm);
-  const retriever = index.asRetriever({ similarityTopK: 3 });
-  return new ContextChatEngine({
-    chatModel: llm,
-    retriever,
-  });
-}
diff --git a/packages/create-llama/templates/components/vectordbs/typescript/mongo/shared.mjs b/packages/create-llama/templates/components/vectordbs/typescript/mongo/shared.mjs
deleted file mode 100644
index 264a82f0626cbb2c603afa3a054375c092557e7c..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/vectordbs/typescript/mongo/shared.mjs
+++ /dev/null
@@ -1,27 +0,0 @@
-export const STORAGE_DIR = "./data";
-export const CHUNK_SIZE = 512;
-export const CHUNK_OVERLAP = 20;
-
-const REQUIRED_ENV_VARS = [
-  "MONGO_URI",
-  "MONGODB_DATABASE",
-  "MONGODB_VECTORS",
-  "MONGODB_VECTOR_INDEX",
-];
-
-export function checkRequiredEnvVars() {
-  const missingEnvVars = REQUIRED_ENV_VARS.filter((envVar) => {
-    return !process.env[envVar];
-  });
-
-  if (missingEnvVars.length > 0) {
-    console.log(
-      `The following environment variables are required but missing: ${missingEnvVars.join(
-        ", ",
-      )}`,
-    );
-    throw new Error(
-      `Missing environment variables: ${missingEnvVars.join(", ")}`,
-    );
-  }
-}
diff --git a/packages/create-llama/templates/components/vectordbs/typescript/none/constants.mjs b/packages/create-llama/templates/components/vectordbs/typescript/none/constants.mjs
deleted file mode 100644
index 8cfb403c3790b0c5088a802071896d2fdca98ded..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/vectordbs/typescript/none/constants.mjs
+++ /dev/null
@@ -1,4 +0,0 @@
-export const STORAGE_DIR = "./data";
-export const STORAGE_CACHE_DIR = "./cache";
-export const CHUNK_SIZE = 512;
-export const CHUNK_OVERLAP = 20;
diff --git a/packages/create-llama/templates/components/vectordbs/typescript/none/generate.mjs b/packages/create-llama/templates/components/vectordbs/typescript/none/generate.mjs
deleted file mode 100644
index 9334f98e47558b5d451e21143b9eacf5b0287955..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/vectordbs/typescript/none/generate.mjs
+++ /dev/null
@@ -1,53 +0,0 @@
-import {
-  serviceContextFromDefaults,
-  SimpleDirectoryReader,
-  storageContextFromDefaults,
-  VectorStoreIndex,
-} from "llamaindex";
-
-import * as dotenv from "dotenv";
-
-import {
-  CHUNK_OVERLAP,
-  CHUNK_SIZE,
-  STORAGE_CACHE_DIR,
-  STORAGE_DIR,
-} from "./constants.mjs";
-
-// Load environment variables from local .env file
-dotenv.config();
-
-async function getRuntime(func) {
-  const start = Date.now();
-  await func();
-  const end = Date.now();
-  return end - start;
-}
-
-async function generateDatasource(serviceContext) {
-  console.log(`Generating storage context...`);
-  // Split documents, create embeddings and store them in the storage context
-  const ms = await getRuntime(async () => {
-    const storageContext = await storageContextFromDefaults({
-      persistDir: STORAGE_CACHE_DIR,
-    });
-    const documents = await new SimpleDirectoryReader().loadData({
-      directoryPath: STORAGE_DIR,
-    });
-    await VectorStoreIndex.fromDocuments(documents, {
-      storageContext,
-      serviceContext,
-    });
-  });
-  console.log(`Storage context successfully generated in ${ms / 1000}s.`);
-}
-
-(async () => {
-  const serviceContext = serviceContextFromDefaults({
-    chunkSize: CHUNK_SIZE,
-    chunkOverlap: CHUNK_OVERLAP,
-  });
-
-  await generateDatasource(serviceContext);
-  console.log("Finished generating storage.");
-})();
diff --git a/packages/create-llama/templates/components/vectordbs/typescript/none/index.ts b/packages/create-llama/templates/components/vectordbs/typescript/none/index.ts
deleted file mode 100644
index e335446cfd72e9da910eab3228848e32e1e0475a..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/vectordbs/typescript/none/index.ts
+++ /dev/null
@@ -1,44 +0,0 @@
-import {
-  ContextChatEngine,
-  LLM,
-  serviceContextFromDefaults,
-  SimpleDocumentStore,
-  storageContextFromDefaults,
-  VectorStoreIndex,
-} from "llamaindex";
-import { CHUNK_OVERLAP, CHUNK_SIZE, STORAGE_CACHE_DIR } from "./constants.mjs";
-
-async function getDataSource(llm: LLM) {
-  const serviceContext = serviceContextFromDefaults({
-    llm,
-    chunkSize: CHUNK_SIZE,
-    chunkOverlap: CHUNK_OVERLAP,
-  });
-  const storageContext = await storageContextFromDefaults({
-    persistDir: `${STORAGE_CACHE_DIR}`,
-  });
-
-  const numberOfDocs = Object.keys(
-    (storageContext.docStore as SimpleDocumentStore).toDict(),
-  ).length;
-  if (numberOfDocs === 0) {
-    throw new Error(
-      `StorageContext is empty - call 'npm run generate' to generate the storage first`,
-    );
-  }
-  return await VectorStoreIndex.init({
-    storageContext,
-    serviceContext,
-  });
-}
-
-export async function createChatEngine(llm: LLM) {
-  const index = await getDataSource(llm);
-  const retriever = index.asRetriever();
-  retriever.similarityTopK = 3;
-
-  return new ContextChatEngine({
-    chatModel: llm,
-    retriever,
-  });
-}
diff --git a/packages/create-llama/templates/components/vectordbs/typescript/pg/generate.mjs b/packages/create-llama/templates/components/vectordbs/typescript/pg/generate.mjs
deleted file mode 100644
index 3d959c698ac4e32d92310812d4ab15e0d67735b7..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/vectordbs/typescript/pg/generate.mjs
+++ /dev/null
@@ -1,45 +0,0 @@
-/* eslint-disable turbo/no-undeclared-env-vars */
-import * as dotenv from "dotenv";
-import {
-  PGVectorStore,
-  SimpleDirectoryReader,
-  VectorStoreIndex,
-  storageContextFromDefaults,
-} from "llamaindex";
-import {
-  PGVECTOR_SCHEMA,
-  PGVECTOR_TABLE,
-  STORAGE_DIR,
-  checkRequiredEnvVars,
-} from "./shared.mjs";
-
-dotenv.config();
-
-async function loadAndIndex() {
-  // load objects from storage and convert them into LlamaIndex Document objects
-  const documents = await new SimpleDirectoryReader().loadData({
-    directoryPath: STORAGE_DIR,
-  });
-
-  // create postgres vector store
-  const vectorStore = new PGVectorStore({
-    connectionString: process.env.PG_CONNECTION_STRING,
-    schemaName: PGVECTOR_SCHEMA,
-    tableName: PGVECTOR_TABLE,
-  });
-  vectorStore.setCollection(STORAGE_DIR);
-  vectorStore.clearCollection();
-
-  // create index from all the Documents
-  console.log("Start creating embeddings...");
-  const storageContext = await storageContextFromDefaults({ vectorStore });
-  await VectorStoreIndex.fromDocuments(documents, { storageContext });
-  console.log(`Successfully created embeddings.`);
-}
-
-(async () => {
-  checkRequiredEnvVars();
-  await loadAndIndex();
-  console.log("Finished generating storage.");
-  process.exit(0);
-})();
diff --git a/packages/create-llama/templates/components/vectordbs/typescript/pg/index.ts b/packages/create-llama/templates/components/vectordbs/typescript/pg/index.ts
deleted file mode 100644
index 7de66a2e30a1aa5fb0f5b7b92bc0c6f3b24bfe7f..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/vectordbs/typescript/pg/index.ts
+++ /dev/null
@@ -1,39 +0,0 @@
-/* eslint-disable turbo/no-undeclared-env-vars */
-import {
-  ContextChatEngine,
-  LLM,
-  PGVectorStore,
-  VectorStoreIndex,
-  serviceContextFromDefaults,
-} from "llamaindex";
-import {
-  CHUNK_OVERLAP,
-  CHUNK_SIZE,
-  PGVECTOR_SCHEMA,
-  PGVECTOR_TABLE,
-  checkRequiredEnvVars,
-} from "./shared.mjs";
-
-async function getDataSource(llm: LLM) {
-  checkRequiredEnvVars();
-  const pgvs = new PGVectorStore({
-    connectionString: process.env.PG_CONNECTION_STRING,
-    schemaName: PGVECTOR_SCHEMA,
-    tableName: PGVECTOR_TABLE,
-  });
-  const serviceContext = serviceContextFromDefaults({
-    llm,
-    chunkSize: CHUNK_SIZE,
-    chunkOverlap: CHUNK_OVERLAP,
-  });
-  return await VectorStoreIndex.fromVectorStore(pgvs, serviceContext);
-}
-
-export async function createChatEngine(llm: LLM) {
-  const index = await getDataSource(llm);
-  const retriever = index.asRetriever({ similarityTopK: 3 });
-  return new ContextChatEngine({
-    chatModel: llm,
-    retriever,
-  });
-}
diff --git a/packages/create-llama/templates/components/vectordbs/typescript/pg/shared.mjs b/packages/create-llama/templates/components/vectordbs/typescript/pg/shared.mjs
deleted file mode 100644
index 8ad729c0af5ac9eb2992903aa4d9c09b8f3c7858..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/vectordbs/typescript/pg/shared.mjs
+++ /dev/null
@@ -1,24 +0,0 @@
-export const STORAGE_DIR = "./data";
-export const CHUNK_SIZE = 512;
-export const CHUNK_OVERLAP = 20;
-export const PGVECTOR_SCHEMA = "public";
-export const PGVECTOR_TABLE = "llamaindex_embedding";
-
-const REQUIRED_ENV_VARS = ["PG_CONNECTION_STRING", "OPENAI_API_KEY"];
-
-export function checkRequiredEnvVars() {
-  const missingEnvVars = REQUIRED_ENV_VARS.filter((envVar) => {
-    return !process.env[envVar];
-  });
-
-  if (missingEnvVars.length > 0) {
-    console.log(
-      `The following environment variables are required but missing: ${missingEnvVars.join(
-        ", ",
-      )}`,
-    );
-    throw new Error(
-      `Missing environment variables: ${missingEnvVars.join(", ")}`,
-    );
-  }
-}
diff --git a/packages/create-llama/templates/components/vectordbs/typescript/pinecone/generate.mjs b/packages/create-llama/templates/components/vectordbs/typescript/pinecone/generate.mjs
deleted file mode 100644
index b371a639a8e56c5186ee45b274539cff81c04973..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/vectordbs/typescript/pinecone/generate.mjs
+++ /dev/null
@@ -1,35 +0,0 @@
-/* eslint-disable turbo/no-undeclared-env-vars */
-import * as dotenv from "dotenv";
-import {
-  PineconeVectorStore,
-  SimpleDirectoryReader,
-  VectorStoreIndex,
-  storageContextFromDefaults,
-} from "llamaindex";
-import { STORAGE_DIR, checkRequiredEnvVars } from "./shared.mjs";
-
-dotenv.config();
-
-async function loadAndIndex() {
-  // load objects from storage and convert them into LlamaIndex Document objects
-  const documents = await new SimpleDirectoryReader().loadData({
-    directoryPath: STORAGE_DIR,
-  });
-
-  // create vector store
-  const vectorStore = new PineconeVectorStore();
-
-  // create index from all the Documentss and store them in Pinecone
-  console.log("Start creating embeddings...");
-  const storageContext = await storageContextFromDefaults({ vectorStore });
-  await VectorStoreIndex.fromDocuments(documents, { storageContext });
-  console.log(
-    "Successfully created embeddings and save to your Pinecone index.",
-  );
-}
-
-(async () => {
-  checkRequiredEnvVars();
-  await loadAndIndex();
-  console.log("Finished generating storage.");
-})();
diff --git a/packages/create-llama/templates/components/vectordbs/typescript/pinecone/index.ts b/packages/create-llama/templates/components/vectordbs/typescript/pinecone/index.ts
deleted file mode 100644
index be18486c4e73c598474c502856d39808bbe98fa8..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/vectordbs/typescript/pinecone/index.ts
+++ /dev/null
@@ -1,29 +0,0 @@
-/* eslint-disable turbo/no-undeclared-env-vars */
-import {
-  ContextChatEngine,
-  LLM,
-  PineconeVectorStore,
-  VectorStoreIndex,
-  serviceContextFromDefaults,
-} from "llamaindex";
-import { CHUNK_OVERLAP, CHUNK_SIZE, checkRequiredEnvVars } from "./shared.mjs";
-
-async function getDataSource(llm: LLM) {
-  checkRequiredEnvVars();
-  const serviceContext = serviceContextFromDefaults({
-    llm,
-    chunkSize: CHUNK_SIZE,
-    chunkOverlap: CHUNK_OVERLAP,
-  });
-  const store = new PineconeVectorStore();
-  return await VectorStoreIndex.fromVectorStore(store, serviceContext);
-}
-
-export async function createChatEngine(llm: LLM) {
-  const index = await getDataSource(llm);
-  const retriever = index.asRetriever({ similarityTopK: 5 });
-  return new ContextChatEngine({
-    chatModel: llm,
-    retriever,
-  });
-}
diff --git a/packages/create-llama/templates/components/vectordbs/typescript/pinecone/shared.mjs b/packages/create-llama/templates/components/vectordbs/typescript/pinecone/shared.mjs
deleted file mode 100644
index f9140261c36f2c98e569fea5847c245a6d151ca9..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/components/vectordbs/typescript/pinecone/shared.mjs
+++ /dev/null
@@ -1,22 +0,0 @@
-export const STORAGE_DIR = "./data";
-export const CHUNK_SIZE = 512;
-export const CHUNK_OVERLAP = 20;
-
-const REQUIRED_ENV_VARS = ["PINECONE_ENVIRONMENT", "PINECONE_API_KEY"];
-
-export function checkRequiredEnvVars() {
-  const missingEnvVars = REQUIRED_ENV_VARS.filter((envVar) => {
-    return !process.env[envVar];
-  });
-
-  if (missingEnvVars.length > 0) {
-    console.log(
-      `The following environment variables are required but missing: ${missingEnvVars.join(
-        ", ",
-      )}`,
-    );
-    throw new Error(
-      `Missing environment variables: ${missingEnvVars.join(", ")}`,
-    );
-  }
-}
diff --git a/packages/create-llama/templates/devcontainer.json b/packages/create-llama/templates/devcontainer.json
deleted file mode 100644
index f87545ffbae3cb2d5779eb581b3c336bce487c59..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/devcontainer.json
+++ /dev/null
@@ -1,35 +0,0 @@
-{
-  "image": "mcr.microsoft.com/vscode/devcontainers/typescript-node:dev-20-bullseye",
-  "features": {
-    "ghcr.io/devcontainers-contrib/features/turborepo-npm:1": {},
-    "ghcr.io/devcontainers-contrib/features/typescript:2": {},
-    "ghcr.io/devcontainers/features/python:1": {
-      "version": "3.11",
-      "toolsToInstall": ["flake8", "black", "mypy", "poetry"]
-    }
-  },
-  "customizations": {
-    "codespaces": {
-      "openFiles": ["README.md"]
-    },
-    "vscode": {
-      "extensions": [
-        "ms-vscode.typescript-language-features",
-        "esbenp.prettier-vscode",
-        "ms-python.python",
-        "ms-python.black-formatter",
-        "ms-python.vscode-flake8",
-        "ms-python.vscode-pylance"
-      ],
-      "settings": {
-        "python.formatting.provider": "black",
-        "python.languageServer": "Pylance",
-        "python.analysis.typeCheckingMode": "basic"
-      }
-    }
-  },
-  "containerEnv": {
-    "POETRY_VIRTUALENVS_CREATE": "false"
-  },
-  "forwardPorts": [3000, 8000]
-}
diff --git a/packages/create-llama/templates/types/simple/express/README-template.md b/packages/create-llama/templates/types/simple/express/README-template.md
deleted file mode 100644
index 7ea94ab755535aefd4d5edc9f20a5908d7fedead..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/simple/express/README-template.md
+++ /dev/null
@@ -1,50 +0,0 @@
-This is a [LlamaIndex](https://www.llamaindex.ai/) project using [Express](https://expressjs.com/) bootstrapped with [`create-llama`](https://github.com/run-llama/LlamaIndexTS/tree/main/packages/create-llama).
-
-## Getting Started
-
-First, install the dependencies:
-
-```
-npm install
-```
-
-Second, run the development server:
-
-```
-npm run dev
-```
-
-Then call the express API endpoint `/api/chat` to see the result:
-
-```
-curl --location 'localhost:8000/api/chat' \
---header 'Content-Type: application/json' \
---data '{ "messages": [{ "role": "user", "content": "Hello" }] }'
-```
-
-You can start editing the API by modifying `src/controllers/chat.controller.ts`. The endpoint auto-updates as you save the file.
-
-## Production
-
-First, build the project:
-
-```
-npm run build
-```
-
-You can then run the production server:
-
-```
-NODE_ENV=production npm run start
-```
-
-> Note that the `NODE_ENV` environment variable is set to `production`. This disables CORS for all origins.
-
-## Learn More
-
-To learn more about LlamaIndex, take a look at the following resources:
-
-- [LlamaIndex Documentation](https://docs.llamaindex.ai) - learn about LlamaIndex (Python features).
-- [LlamaIndexTS Documentation](https://ts.llamaindex.ai) - learn about LlamaIndex (Typescript features).
-
-You can check out [the LlamaIndexTS GitHub repository](https://github.com/run-llama/LlamaIndexTS) - your feedback and contributions are welcome!
diff --git a/packages/create-llama/templates/types/simple/express/eslintrc.json b/packages/create-llama/templates/types/simple/express/eslintrc.json
deleted file mode 100644
index cf20cdc7a788ed949375c3e013b27e692d6df234..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/simple/express/eslintrc.json
+++ /dev/null
@@ -1,3 +0,0 @@
-{
-  "extends": "eslint:recommended"
-}
diff --git a/packages/create-llama/templates/types/simple/express/gitignore b/packages/create-llama/templates/types/simple/express/gitignore
deleted file mode 100644
index 7d5e30fc24e77107da0e35176a5c3caebe7f8862..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/simple/express/gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-# local env files
-.env
-node_modules/
\ No newline at end of file
diff --git a/packages/create-llama/templates/types/simple/express/index.ts b/packages/create-llama/templates/types/simple/express/index.ts
deleted file mode 100644
index 721c4ec9dd1922a36756c1b78142cab739cb1e85..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/simple/express/index.ts
+++ /dev/null
@@ -1,41 +0,0 @@
-/* eslint-disable turbo/no-undeclared-env-vars */
-import cors from "cors";
-import "dotenv/config";
-import express, { Express, Request, Response } from "express";
-import chatRouter from "./src/routes/chat.route";
-
-const app: Express = express();
-const port = parseInt(process.env.PORT || "8000");
-
-const env = process.env["NODE_ENV"];
-const isDevelopment = !env || env === "development";
-const prodCorsOrigin = process.env["PROD_CORS_ORIGIN"];
-
-app.use(express.json());
-
-if (isDevelopment) {
-  console.warn("Running in development mode - allowing CORS for all origins");
-  app.use(cors());
-} else if (prodCorsOrigin) {
-  console.log(
-    `Running in production mode - allowing CORS for domain: ${prodCorsOrigin}`,
-  );
-  const corsOptions = {
-    origin: prodCorsOrigin, // Restrict to production domain
-  };
-  app.use(cors(corsOptions));
-} else {
-  console.warn("Production CORS origin not set, defaulting to no CORS.");
-}
-
-app.use(express.text());
-
-app.get("/", (req: Request, res: Response) => {
-  res.send("LlamaIndex Express Server");
-});
-
-app.use("/api/chat", chatRouter);
-
-app.listen(port, () => {
-  console.log(`⚡️[server]: Server is running at http://localhost:${port}`);
-});
diff --git a/packages/create-llama/templates/types/simple/express/package.json b/packages/create-llama/templates/types/simple/express/package.json
deleted file mode 100644
index 849261e4a23743e193c97b3482be5b45f6e7f1f4..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/simple/express/package.json
+++ /dev/null
@@ -1,27 +0,0 @@
-{
-  "name": "llama-index-express",
-  "version": "1.0.0",
-  "main": "dist/index.js",
-  "type": "module",
-  "scripts": {
-    "build": "tsup index.ts --format esm --dts",
-    "start": "node dist/index.js",
-    "dev": "concurrently \"tsup index.ts --format esm --dts --watch\" \"nodemon -q dist/index.js\""
-  },
-  "dependencies": {
-    "cors": "^2.8.5",
-    "dotenv": "^16.3.1",
-    "express": "^4.18.2",
-    "llamaindex": "0.0.37"
-  },
-  "devDependencies": {
-    "@types/cors": "^2.8.17",
-    "@types/express": "^4.17.21",
-    "@types/node": "^20.9.5",
-    "concurrently": "^8.2.2",
-    "eslint": "^8.54.0",
-    "nodemon": "^3.0.1",
-    "tsup": "^7.3.0",
-    "typescript": "^5.3.2"
-  }
-}
diff --git a/packages/create-llama/templates/types/simple/express/src/controllers/chat.controller.ts b/packages/create-llama/templates/types/simple/express/src/controllers/chat.controller.ts
deleted file mode 100644
index 9f9639b72724669e42ddd2ad25dcc2d31368c07c..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/simple/express/src/controllers/chat.controller.ts
+++ /dev/null
@@ -1,67 +0,0 @@
-import { Request, Response } from "express";
-import { ChatMessage, MessageContent, OpenAI } from "llamaindex";
-import { createChatEngine } from "./engine";
-
-const convertMessageContent = (
-  textMessage: string,
-  imageUrl: string | undefined,
-): MessageContent => {
-  if (!imageUrl) return textMessage;
-  return [
-    {
-      type: "text",
-      text: textMessage,
-    },
-    {
-      type: "image_url",
-      image_url: {
-        url: imageUrl,
-      },
-    },
-  ];
-};
-
-export const chat = async (req: Request, res: Response) => {
-  try {
-    const { messages, data }: { messages: ChatMessage[]; data: any } = req.body;
-    const userMessage = messages.pop();
-    if (!messages || !userMessage || userMessage.role !== "user") {
-      return res.status(400).json({
-        error:
-          "messages are required in the request body and the last message must be from the user",
-      });
-    }
-
-    const llm = new OpenAI({
-      model: process.env.MODEL || "gpt-3.5-turbo",
-    });
-
-    // Convert message content from Vercel/AI format to LlamaIndex/OpenAI format
-    // Note: The non-streaming template does not need the Vercel/AI format, we're still using it for consistency with the streaming template
-    const userMessageContent = convertMessageContent(
-      userMessage.content,
-      data?.imageUrl,
-    );
-
-    const chatEngine = await createChatEngine(llm);
-
-    // Calling LlamaIndex's ChatEngine to get a response
-    const response = await chatEngine.chat({
-      message: userMessageContent,
-      messages,
-    });
-    const result: ChatMessage = {
-      role: "assistant",
-      content: response.response,
-    };
-
-    return res.status(200).json({
-      result,
-    });
-  } catch (error) {
-    console.error("[LlamaIndex]", error);
-    return res.status(500).json({
-      error: (error as Error).message,
-    });
-  }
-};
diff --git a/packages/create-llama/templates/types/simple/express/src/controllers/engine/index.ts b/packages/create-llama/templates/types/simple/express/src/controllers/engine/index.ts
deleted file mode 100644
index abb02e90cd2ce91096791bf10c4665afcbe11d38..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/simple/express/src/controllers/engine/index.ts
+++ /dev/null
@@ -1,7 +0,0 @@
-import { LLM, SimpleChatEngine } from "llamaindex";
-
-export async function createChatEngine(llm: LLM) {
-  return new SimpleChatEngine({
-    llm,
-  });
-}
diff --git a/packages/create-llama/templates/types/simple/express/src/routes/chat.route.ts b/packages/create-llama/templates/types/simple/express/src/routes/chat.route.ts
deleted file mode 100644
index bdfeb08534b9a2c987c4d23a4bb5c6df50075908..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/simple/express/src/routes/chat.route.ts
+++ /dev/null
@@ -1,8 +0,0 @@
-import express from "express";
-import { chat } from "../controllers/chat.controller";
-
-const llmRouter = express.Router();
-
-llmRouter.route("/").post(chat);
-
-export default llmRouter;
diff --git a/packages/create-llama/templates/types/simple/express/tsconfig.json b/packages/create-llama/templates/types/simple/express/tsconfig.json
deleted file mode 100644
index bc819cab43220b31fac6abb1d4a36ac5880ef011..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/simple/express/tsconfig.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-  "compilerOptions": {
-    "target": "es2016",
-    "esModuleInterop": true,
-    "forceConsistentCasingInFileNames": true,
-    "strict": true,
-    "skipLibCheck": true,
-    "moduleResolution": "node"
-  }
-}
diff --git a/packages/create-llama/templates/types/simple/fastapi/README-template.md b/packages/create-llama/templates/types/simple/fastapi/README-template.md
deleted file mode 100644
index b1a35c42e80331465066fcf5d7200ad2fdb7f92d..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/simple/fastapi/README-template.md
+++ /dev/null
@@ -1,58 +0,0 @@
-This is a [LlamaIndex](https://www.llamaindex.ai/) project using [FastAPI](https://fastapi.tiangolo.com/) bootstrapped with [`create-llama`](https://github.com/run-llama/LlamaIndexTS/tree/main/packages/create-llama).
-
-## Getting Started
-
-First, setup the environment with poetry:
-
-> **_Note:_** This step is not needed if you are using the dev-container.
-
-```
-poetry install
-poetry shell
-```
-
-By default, we use the OpenAI LLM (though you can customize, see app/api/routers/chat.py). As a result you need to specify an `OPENAI_API_KEY` in an .env file in this directory.
-
-Example `backend/.env` file:
-
-```
-OPENAI_API_KEY=<openai_api_key>
-```
-
-Second, generate the embeddings of the documents in the `./data` directory (if this folder exists - otherwise, skip this step):
-
-```
-python app/engine/generate.py
-```
-
-Third, run the development server:
-
-```
-python main.py
-```
-
-Then call the API endpoint `/api/chat` to see the result:
-
-```
-curl --location 'localhost:8000/api/chat' \
---header 'Content-Type: application/json' \
---data '{ "messages": [{ "role": "user", "content": "Hello" }] }'
-```
-
-You can start editing the API by modifying `app/api/routers/chat.py`. The endpoint auto-updates as you save the file.
-
-Open [http://localhost:8000/docs](http://localhost:8000/docs) with your browser to see the Swagger UI of the API.
-
-The API allows CORS for all origins to simplify development. You can change this behavior by setting the `ENVIRONMENT` environment variable to `prod`:
-
-```
-ENVIRONMENT=prod python main.py
-```
-
-## Learn More
-
-To learn more about LlamaIndex, take a look at the following resources:
-
-- [LlamaIndex Documentation](https://docs.llamaindex.ai) - learn about LlamaIndex.
-
-You can check out [the LlamaIndex GitHub repository](https://github.com/run-llama/llama_index) - your feedback and contributions are welcome!
diff --git a/packages/create-llama/templates/types/simple/fastapi/app/__init__.py b/packages/create-llama/templates/types/simple/fastapi/app/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/packages/create-llama/templates/types/simple/fastapi/app/api/__init__.py b/packages/create-llama/templates/types/simple/fastapi/app/api/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/packages/create-llama/templates/types/simple/fastapi/app/api/routers/__init__.py b/packages/create-llama/templates/types/simple/fastapi/app/api/routers/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/packages/create-llama/templates/types/simple/fastapi/app/api/routers/chat.py b/packages/create-llama/templates/types/simple/fastapi/app/api/routers/chat.py
deleted file mode 100644
index 8405f2ac592bb5666ed60a127f7f2b8c22c3e154..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/simple/fastapi/app/api/routers/chat.py
+++ /dev/null
@@ -1,54 +0,0 @@
-from typing import List
-from pydantic import BaseModel
-from fastapi import APIRouter, Depends, HTTPException, status
-from llama_index.core.chat_engine.types import BaseChatEngine
-from llama_index.core.llms import ChatMessage, MessageRole
-from app.engine import get_chat_engine
-
-chat_router = r = APIRouter()
-
-
-class _Message(BaseModel):
-    role: MessageRole
-    content: str
-
-
-class _ChatData(BaseModel):
-    messages: List[_Message]
-
-
-class _Result(BaseModel):
-    result: _Message
-
-
-@r.post("")
-async def chat(
-    data: _ChatData,
-    chat_engine: BaseChatEngine = Depends(get_chat_engine),
-) -> _Result:
-    # check preconditions and get last message
-    if len(data.messages) == 0:
-        raise HTTPException(
-            status_code=status.HTTP_400_BAD_REQUEST,
-            detail="No messages provided",
-        )
-    lastMessage = data.messages.pop()
-    if lastMessage.role != MessageRole.USER:
-        raise HTTPException(
-            status_code=status.HTTP_400_BAD_REQUEST,
-            detail="Last message must be from user",
-        )
-    # convert messages coming from the request to type ChatMessage
-    messages = [
-        ChatMessage(
-            role=m.role,
-            content=m.content,
-        )
-        for m in data.messages
-    ]
-
-    # query chat engine
-    response = await chat_engine.achat(lastMessage.content, messages)
-    return _Result(
-        result=_Message(role=MessageRole.ASSISTANT, content=response.response)
-    )
diff --git a/packages/create-llama/templates/types/simple/fastapi/app/engine/__init__.py b/packages/create-llama/templates/types/simple/fastapi/app/engine/__init__.py
deleted file mode 100644
index fd8bb96a812db057eb2b0bcd6e1b8a17d221f76e..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/simple/fastapi/app/engine/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from llama_index.core.chat_engine import SimpleChatEngine
-
-
-def get_chat_engine():
-    return SimpleChatEngine.from_defaults()
diff --git a/packages/create-llama/templates/types/simple/fastapi/app/settings.py b/packages/create-llama/templates/types/simple/fastapi/app/settings.py
deleted file mode 100644
index 3f2c5e078e0fddb01d433ee0147de5a207f23e36..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/simple/fastapi/app/settings.py
+++ /dev/null
@@ -1,41 +0,0 @@
-import os
-from typing import Dict
-from llama_index.core.settings import Settings
-from llama_index.llms.openai import OpenAI
-from llama_index.embeddings.openai import OpenAIEmbedding
-
-
-def llm_config_from_env() -> Dict:
-    from llama_index.core.constants import DEFAULT_TEMPERATURE
-
-    model = os.getenv("MODEL")
-    temperature = os.getenv("LLM_TEMPERATURE", DEFAULT_TEMPERATURE)
-    max_tokens = os.getenv("LLM_MAX_TOKENS")
-
-    config = {
-        "model": model,
-        "temperature": float(temperature),
-        "max_tokens": int(max_tokens) if max_tokens is not None else None,
-    }
-    return config
-
-
-def embedding_config_from_env() -> Dict:
-    model = os.getenv("EMBEDDING_MODEL")
-    dimension = os.getenv("EMBEDDING_DIM")
-
-    config = {
-        "model": model,
-        "dimension": int(dimension) if dimension is not None else None,
-    }
-    return config
-
-
-def init_settings():
-    llm_configs = llm_config_from_env()
-    embedding_configs = embedding_config_from_env()
-
-    Settings.llm = OpenAI(**llm_configs)
-    Settings.embed_model = OpenAIEmbedding(**embedding_configs)
-    Settings.chunk_size = int(os.getenv("CHUNK_SIZE", "1024"))
-    Settings.chunk_overlap = int(os.getenv("CHUNK_OVERLAP", "20"))
diff --git a/packages/create-llama/templates/types/simple/fastapi/gitignore b/packages/create-llama/templates/types/simple/fastapi/gitignore
deleted file mode 100644
index a6ad564cd45eee8c8bd1fdc633d733d17b4a777a..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/simple/fastapi/gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-__pycache__
-storage
-.env
diff --git a/packages/create-llama/templates/types/simple/fastapi/main.py b/packages/create-llama/templates/types/simple/fastapi/main.py
deleted file mode 100644
index 41721a8f2f95c5e8883b7dcbc9ef627746d27c27..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/simple/fastapi/main.py
+++ /dev/null
@@ -1,39 +0,0 @@
-from dotenv import load_dotenv
-
-load_dotenv()
-
-import logging
-import os
-import uvicorn
-from fastapi import FastAPI
-from fastapi.middleware.cors import CORSMiddleware
-from app.api.routers.chat import chat_router
-from app.settings import init_settings
-
-app = FastAPI()
-
-init_settings()
-
-environment = os.getenv("ENVIRONMENT", "dev")  # Default to 'development' if not set
-
-
-if environment == "dev":
-    logger = logging.getLogger("uvicorn")
-    logger.warning("Running in development mode - allowing CORS for all origins")
-    app.add_middleware(
-        CORSMiddleware,
-        allow_origins=["*"],
-        allow_credentials=True,
-        allow_methods=["*"],
-        allow_headers=["*"],
-    )
-
-app.include_router(chat_router, prefix="/api/chat")
-
-
-if __name__ == "__main__":
-    app_host = os.getenv("APP_HOST", "0.0.0.0")
-    app_port = int(os.getenv("APP_PORT", "8000"))
-    reload = True if environment == "dev" else False
-
-    uvicorn.run(app="main:app", host=app_host, port=app_port, reload=reload)
diff --git a/packages/create-llama/templates/types/simple/fastapi/pyproject.toml b/packages/create-llama/templates/types/simple/fastapi/pyproject.toml
deleted file mode 100644
index 45c3ae567a6a1b7392255a6aa49fe896dbb12f3f..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/simple/fastapi/pyproject.toml
+++ /dev/null
@@ -1,17 +0,0 @@
-[tool.poetry]
-name = "app"
-version = "0.1.0"
-description = ""
-authors = ["Marcus Schiesser <mail@marcusschiesser.de>"]
-readme = "README.md"
-
-[tool.poetry.dependencies]
-python = "^3.11,<3.12"
-fastapi = "^0.109.1"
-uvicorn = { extras = ["standard"], version = "^0.23.2" }
-python-dotenv = "^1.0.0"
-llama-index = "^0.10.7"
-
-[build-system]
-requires = ["poetry-core"]
-build-backend = "poetry.core.masonry.api"
diff --git a/packages/create-llama/templates/types/simple/fastapi/tests/__init__.py b/packages/create-llama/templates/types/simple/fastapi/tests/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/packages/create-llama/templates/types/streaming/express/README-template.md b/packages/create-llama/templates/types/streaming/express/README-template.md
deleted file mode 100644
index 0e9d796106725e9d9ebaa653fa5471e807ee1760..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/express/README-template.md
+++ /dev/null
@@ -1,50 +0,0 @@
-This is a [LlamaIndex](https://www.llamaindex.ai/) project using [Express](https://expressjs.com/) bootstrapped with [`create-llama`](https://github.com/run-llama/LlamaIndexTS/tree/main/packages/create-llama).
-
-## Getting Started
-
-First, install the dependencies:
-
-```
-npm install
-```
-
-Second, run the development server:
-
-```
-npm run dev
-```
-
-Then call the express API endpoint `/api/chat` to see the result:
-
-```
-curl --location 'localhost:8000/api/chat' \
---header 'Content-Type: text/plain' \
---data '{ "messages": [{ "role": "user", "content": "Hello" }] }'
-```
-
-You can start editing the API by modifying `src/controllers/chat.controller.ts`. The endpoint auto-updates as you save the file.
-
-## Production
-
-First, build the project:
-
-```
-npm run build
-```
-
-You can then run the production server:
-
-```
-NODE_ENV=production npm run start
-```
-
-> Note that the `NODE_ENV` environment variable is set to `production`. This disables CORS for all origins.
-
-## Learn More
-
-To learn more about LlamaIndex, take a look at the following resources:
-
-- [LlamaIndex Documentation](https://docs.llamaindex.ai) - learn about LlamaIndex (Python features).
-- [LlamaIndexTS Documentation](https://ts.llamaindex.ai) - learn about LlamaIndex (Typescript features).
-
-You can check out [the LlamaIndexTS GitHub repository](https://github.com/run-llama/LlamaIndexTS) - your feedback and contributions are welcome!
diff --git a/packages/create-llama/templates/types/streaming/express/eslintrc.json b/packages/create-llama/templates/types/streaming/express/eslintrc.json
deleted file mode 100644
index cf20cdc7a788ed949375c3e013b27e692d6df234..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/express/eslintrc.json
+++ /dev/null
@@ -1,3 +0,0 @@
-{
-  "extends": "eslint:recommended"
-}
diff --git a/packages/create-llama/templates/types/streaming/express/gitignore b/packages/create-llama/templates/types/streaming/express/gitignore
deleted file mode 100644
index 7d5e30fc24e77107da0e35176a5c3caebe7f8862..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/express/gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-# local env files
-.env
-node_modules/
\ No newline at end of file
diff --git a/packages/create-llama/templates/types/streaming/express/index.ts b/packages/create-llama/templates/types/streaming/express/index.ts
deleted file mode 100644
index 721c4ec9dd1922a36756c1b78142cab739cb1e85..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/express/index.ts
+++ /dev/null
@@ -1,41 +0,0 @@
-/* eslint-disable turbo/no-undeclared-env-vars */
-import cors from "cors";
-import "dotenv/config";
-import express, { Express, Request, Response } from "express";
-import chatRouter from "./src/routes/chat.route";
-
-const app: Express = express();
-const port = parseInt(process.env.PORT || "8000");
-
-const env = process.env["NODE_ENV"];
-const isDevelopment = !env || env === "development";
-const prodCorsOrigin = process.env["PROD_CORS_ORIGIN"];
-
-app.use(express.json());
-
-if (isDevelopment) {
-  console.warn("Running in development mode - allowing CORS for all origins");
-  app.use(cors());
-} else if (prodCorsOrigin) {
-  console.log(
-    `Running in production mode - allowing CORS for domain: ${prodCorsOrigin}`,
-  );
-  const corsOptions = {
-    origin: prodCorsOrigin, // Restrict to production domain
-  };
-  app.use(cors(corsOptions));
-} else {
-  console.warn("Production CORS origin not set, defaulting to no CORS.");
-}
-
-app.use(express.text());
-
-app.get("/", (req: Request, res: Response) => {
-  res.send("LlamaIndex Express Server");
-});
-
-app.use("/api/chat", chatRouter);
-
-app.listen(port, () => {
-  console.log(`⚡️[server]: Server is running at http://localhost:${port}`);
-});
diff --git a/packages/create-llama/templates/types/streaming/express/package.json b/packages/create-llama/templates/types/streaming/express/package.json
deleted file mode 100644
index 3e46bb5fb2b427e7d7b1f4cc62b5334aa72437da..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/express/package.json
+++ /dev/null
@@ -1,28 +0,0 @@
-{
-  "name": "llama-index-express-streaming",
-  "version": "1.0.0",
-  "main": "dist/index.js",
-  "type": "module",
-  "scripts": {
-    "build": "tsup index.ts --format esm --dts",
-    "start": "node dist/index.js",
-    "dev": "concurrently \"tsup index.ts --format esm --dts --watch\" \"nodemon -q dist/index.js\""
-  },
-  "dependencies": {
-    "ai": "^2.2.25",
-    "cors": "^2.8.5",
-    "dotenv": "^16.3.1",
-    "express": "^4.18.2",
-    "llamaindex": "0.0.37"
-  },
-  "devDependencies": {
-    "@types/cors": "^2.8.16",
-    "@types/express": "^4.17.21",
-    "@types/node": "^20.9.5",
-    "concurrently": "^8.2.2",
-    "eslint": "^8.54.0",
-    "nodemon": "^3.0.1",
-    "tsup": "^8.0.1",
-    "typescript": "^5.3.2"
-  }
-}
diff --git a/packages/create-llama/templates/types/streaming/express/src/controllers/chat.controller.ts b/packages/create-llama/templates/types/streaming/express/src/controllers/chat.controller.ts
deleted file mode 100644
index 9d1eb0c69b50ae4f258e5321eec1ffcc5d0bda1e..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/express/src/controllers/chat.controller.ts
+++ /dev/null
@@ -1,81 +0,0 @@
-import { streamToResponse } from "ai";
-import { Request, Response } from "express";
-import { ChatMessage, MessageContent, OpenAI } from "llamaindex";
-import { createChatEngine } from "./engine";
-import { LlamaIndexStream } from "./llamaindex-stream";
-
-const convertMessageContent = (
-  textMessage: string,
-  imageUrl: string | undefined,
-): MessageContent => {
-  if (!imageUrl) return textMessage;
-  return [
-    {
-      type: "text",
-      text: textMessage,
-    },
-    {
-      type: "image_url",
-      image_url: {
-        url: imageUrl,
-      },
-    },
-  ];
-};
-
-export const chat = async (req: Request, res: Response) => {
-  try {
-    const { messages, data }: { messages: ChatMessage[]; data: any } = req.body;
-    const userMessage = messages.pop();
-    if (!messages || !userMessage || userMessage.role !== "user") {
-      return res.status(400).json({
-        error:
-          "messages are required in the request body and the last message must be from the user",
-      });
-    }
-
-    const llm = new OpenAI({
-      model: (process.env.MODEL as any) || "gpt-3.5-turbo",
-    });
-
-    const chatEngine = await createChatEngine(llm);
-
-    // Convert message content from Vercel/AI format to LlamaIndex/OpenAI format
-    const userMessageContent = convertMessageContent(
-      userMessage.content,
-      data?.imageUrl,
-    );
-
-    // Calling LlamaIndex's ChatEngine to get a streamed response
-    const response = await chatEngine.chat({
-      message: userMessageContent,
-      chatHistory: messages,
-      stream: true,
-    });
-
-    // Return a stream, which can be consumed by the Vercel/AI client
-    const { stream, data: streamData } = LlamaIndexStream(response, {
-      parserOptions: {
-        image_url: data?.imageUrl,
-      },
-    });
-
-    // Pipe LlamaIndexStream to response
-    const processedStream = stream.pipeThrough(streamData.stream);
-    return streamToResponse(processedStream, res, {
-      headers: {
-        // response MUST have the `X-Experimental-Stream-Data: 'true'` header
-        // so that the client uses the correct parsing logic, see
-        // https://sdk.vercel.ai/docs/api-reference/stream-data#on-the-server
-        "X-Experimental-Stream-Data": "true",
-        "Content-Type": "text/plain; charset=utf-8",
-        "Access-Control-Expose-Headers": "X-Experimental-Stream-Data",
-      },
-    });
-  } catch (error) {
-    console.error("[LlamaIndex]", error);
-    return res.status(500).json({
-      error: (error as Error).message,
-    });
-  }
-};
diff --git a/packages/create-llama/templates/types/streaming/express/src/controllers/engine/index.ts b/packages/create-llama/templates/types/streaming/express/src/controllers/engine/index.ts
deleted file mode 100644
index abb02e90cd2ce91096791bf10c4665afcbe11d38..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/express/src/controllers/engine/index.ts
+++ /dev/null
@@ -1,7 +0,0 @@
-import { LLM, SimpleChatEngine } from "llamaindex";
-
-export async function createChatEngine(llm: LLM) {
-  return new SimpleChatEngine({
-    llm,
-  });
-}
diff --git a/packages/create-llama/templates/types/streaming/express/src/controllers/llamaindex-stream.ts b/packages/create-llama/templates/types/streaming/express/src/controllers/llamaindex-stream.ts
deleted file mode 100644
index 6ddd8eae68bc199188d07a0af8f27a12b2a6abb3..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/express/src/controllers/llamaindex-stream.ts
+++ /dev/null
@@ -1,68 +0,0 @@
-import {
-  JSONValue,
-  createCallbacksTransformer,
-  createStreamDataTransformer,
-  experimental_StreamData,
-  trimStartOfStreamHelper,
-  type AIStreamCallbacksAndOptions,
-} from "ai";
-import { Response } from "llamaindex";
-
-type ParserOptions = {
-  image_url?: string;
-};
-
-function createParser(
-  res: AsyncIterable<Response>,
-  data: experimental_StreamData,
-  opts?: ParserOptions,
-) {
-  const it = res[Symbol.asyncIterator]();
-  const trimStartOfStream = trimStartOfStreamHelper();
-  return new ReadableStream<string>({
-    start() {
-      // if image_url is provided, send it via the data stream
-      if (opts?.image_url) {
-        const message: JSONValue = {
-          type: "image_url",
-          image_url: {
-            url: opts.image_url,
-          },
-        };
-        data.append(message);
-      } else {
-        data.append({}); // send an empty image response for the user's message
-      }
-    },
-    async pull(controller): Promise<void> {
-      const { value, done } = await it.next();
-      if (done) {
-        controller.close();
-        data.append({}); // send an empty image response for the assistant's message
-        data.close();
-        return;
-      }
-
-      const text = trimStartOfStream(value.response ?? "");
-      if (text) {
-        controller.enqueue(text);
-      }
-    },
-  });
-}
-
-export function LlamaIndexStream(
-  res: AsyncIterable<Response>,
-  opts?: {
-    callbacks?: AIStreamCallbacksAndOptions;
-    parserOptions?: ParserOptions;
-  },
-): { stream: ReadableStream; data: experimental_StreamData } {
-  const data = new experimental_StreamData();
-  return {
-    stream: createParser(res, data, opts?.parserOptions)
-      .pipeThrough(createCallbacksTransformer(opts?.callbacks))
-      .pipeThrough(createStreamDataTransformer(true)),
-    data,
-  };
-}
diff --git a/packages/create-llama/templates/types/streaming/express/src/routes/chat.route.ts b/packages/create-llama/templates/types/streaming/express/src/routes/chat.route.ts
deleted file mode 100644
index bdfeb08534b9a2c987c4d23a4bb5c6df50075908..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/express/src/routes/chat.route.ts
+++ /dev/null
@@ -1,8 +0,0 @@
-import express from "express";
-import { chat } from "../controllers/chat.controller";
-
-const llmRouter = express.Router();
-
-llmRouter.route("/").post(chat);
-
-export default llmRouter;
diff --git a/packages/create-llama/templates/types/streaming/express/tsconfig.json b/packages/create-llama/templates/types/streaming/express/tsconfig.json
deleted file mode 100644
index bc819cab43220b31fac6abb1d4a36ac5880ef011..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/express/tsconfig.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-  "compilerOptions": {
-    "target": "es2016",
-    "esModuleInterop": true,
-    "forceConsistentCasingInFileNames": true,
-    "strict": true,
-    "skipLibCheck": true,
-    "moduleResolution": "node"
-  }
-}
diff --git a/packages/create-llama/templates/types/streaming/fastapi/README-template.md b/packages/create-llama/templates/types/streaming/fastapi/README-template.md
deleted file mode 100644
index 35ef1125adf9044ee229105362e93fe07766d73a..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/fastapi/README-template.md
+++ /dev/null
@@ -1,58 +0,0 @@
-This is a [LlamaIndex](https://www.llamaindex.ai/) project using [FastAPI](https://fastapi.tiangolo.com/) bootstrapped with [`create-llama`](https://github.com/run-llama/LlamaIndexTS/tree/main/packages/create-llama).
-
-## Getting Started
-
-First, setup the environment with poetry:
-
-> **_Note:_** This step is not needed if you are using the dev-container.
-
-```
-poetry install
-poetry shell
-```
-
-By default, we use the OpenAI LLM (though you can customize, see `app/settings.py`). As a result you need to specify an `OPENAI_API_KEY` in an .env file in this directory.
-
-Example `.env` file:
-
-```
-OPENAI_API_KEY=<openai_api_key>
-```
-
-Second, generate the embeddings of the documents in the `./data` directory (if this folder exists - otherwise, skip this step):
-
-```
-python app/engine/generate.py
-```
-
-Third, run the development server:
-
-```
-python main.py
-```
-
-Then call the API endpoint `/api/chat` to see the result:
-
-```
-curl --location 'localhost:8000/api/chat' \
---header 'Content-Type: application/json' \
---data '{ "messages": [{ "role": "user", "content": "Hello" }] }'
-```
-
-You can start editing the API by modifying `app/api/routers/chat.py`. The endpoint auto-updates as you save the file.
-
-Open [http://localhost:8000/docs](http://localhost:8000/docs) with your browser to see the Swagger UI of the API.
-
-The API allows CORS for all origins to simplify development. You can change this behavior by setting the `ENVIRONMENT` environment variable to `prod`:
-
-```
-ENVIRONMENT=prod python main.py
-```
-
-## Learn More
-
-To learn more about LlamaIndex, take a look at the following resources:
-
-- [LlamaIndex Documentation](https://docs.llamaindex.ai) - learn about LlamaIndex.
-
-You can check out [the LlamaIndex GitHub repository](https://github.com/run-llama/llama_index) - your feedback and contributions are welcome!
diff --git a/packages/create-llama/templates/types/streaming/fastapi/app/__init__.py b/packages/create-llama/templates/types/streaming/fastapi/app/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/packages/create-llama/templates/types/streaming/fastapi/app/api/__init__.py b/packages/create-llama/templates/types/streaming/fastapi/app/api/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/packages/create-llama/templates/types/streaming/fastapi/app/api/routers/__init__.py b/packages/create-llama/templates/types/streaming/fastapi/app/api/routers/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/packages/create-llama/templates/types/streaming/fastapi/app/api/routers/chat.py b/packages/create-llama/templates/types/streaming/fastapi/app/api/routers/chat.py
deleted file mode 100644
index 278a9a753d626d79fb0ca9109893a70fb400bfa6..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/fastapi/app/api/routers/chat.py
+++ /dev/null
@@ -1,59 +0,0 @@
-from typing import List
-from pydantic import BaseModel
-from fastapi.responses import StreamingResponse
-from fastapi import APIRouter, Depends, HTTPException, Request, status
-from llama_index.core.chat_engine.types import BaseChatEngine
-from llama_index.core.llms import ChatMessage, MessageRole
-from app.engine import get_chat_engine
-
-chat_router = r = APIRouter()
-
-
-class _Message(BaseModel):
-    role: MessageRole
-    content: str
-
-
-class _ChatData(BaseModel):
-    messages: List[_Message]
-
-
-@r.post("")
-async def chat(
-    request: Request,
-    data: _ChatData,
-    chat_engine: BaseChatEngine = Depends(get_chat_engine),
-):
-    # check preconditions and get last message
-    if len(data.messages) == 0:
-        raise HTTPException(
-            status_code=status.HTTP_400_BAD_REQUEST,
-            detail="No messages provided",
-        )
-    lastMessage = data.messages.pop()
-    if lastMessage.role != MessageRole.USER:
-        raise HTTPException(
-            status_code=status.HTTP_400_BAD_REQUEST,
-            detail="Last message must be from user",
-        )
-    # convert messages coming from the request to type ChatMessage
-    messages = [
-        ChatMessage(
-            role=m.role,
-            content=m.content,
-        )
-        for m in data.messages
-    ]
-
-    # query chat engine
-    response = await chat_engine.astream_chat(lastMessage.content, messages)
-
-    # stream response
-    async def event_generator():
-        async for token in response.async_response_gen():
-            # If client closes connection, stop sending events
-            if await request.is_disconnected():
-                break
-            yield token
-
-    return StreamingResponse(event_generator(), media_type="text/plain")
diff --git a/packages/create-llama/templates/types/streaming/fastapi/app/engine/__init__.py b/packages/create-llama/templates/types/streaming/fastapi/app/engine/__init__.py
deleted file mode 100644
index fd8bb96a812db057eb2b0bcd6e1b8a17d221f76e..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/fastapi/app/engine/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from llama_index.core.chat_engine import SimpleChatEngine
-
-
-def get_chat_engine():
-    return SimpleChatEngine.from_defaults()
diff --git a/packages/create-llama/templates/types/streaming/fastapi/app/settings.py b/packages/create-llama/templates/types/streaming/fastapi/app/settings.py
deleted file mode 100644
index 3f2c5e078e0fddb01d433ee0147de5a207f23e36..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/fastapi/app/settings.py
+++ /dev/null
@@ -1,41 +0,0 @@
-import os
-from typing import Dict
-from llama_index.core.settings import Settings
-from llama_index.llms.openai import OpenAI
-from llama_index.embeddings.openai import OpenAIEmbedding
-
-
-def llm_config_from_env() -> Dict:
-    from llama_index.core.constants import DEFAULT_TEMPERATURE
-
-    model = os.getenv("MODEL")
-    temperature = os.getenv("LLM_TEMPERATURE", DEFAULT_TEMPERATURE)
-    max_tokens = os.getenv("LLM_MAX_TOKENS")
-
-    config = {
-        "model": model,
-        "temperature": float(temperature),
-        "max_tokens": int(max_tokens) if max_tokens is not None else None,
-    }
-    return config
-
-
-def embedding_config_from_env() -> Dict:
-    model = os.getenv("EMBEDDING_MODEL")
-    dimension = os.getenv("EMBEDDING_DIM")
-
-    config = {
-        "model": model,
-        "dimension": int(dimension) if dimension is not None else None,
-    }
-    return config
-
-
-def init_settings():
-    llm_configs = llm_config_from_env()
-    embedding_configs = embedding_config_from_env()
-
-    Settings.llm = OpenAI(**llm_configs)
-    Settings.embed_model = OpenAIEmbedding(**embedding_configs)
-    Settings.chunk_size = int(os.getenv("CHUNK_SIZE", "1024"))
-    Settings.chunk_overlap = int(os.getenv("CHUNK_OVERLAP", "20"))
diff --git a/packages/create-llama/templates/types/streaming/fastapi/gitignore b/packages/create-llama/templates/types/streaming/fastapi/gitignore
deleted file mode 100644
index a6ad564cd45eee8c8bd1fdc633d733d17b4a777a..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/fastapi/gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-__pycache__
-storage
-.env
diff --git a/packages/create-llama/templates/types/streaming/fastapi/main.py b/packages/create-llama/templates/types/streaming/fastapi/main.py
deleted file mode 100644
index edba3d3a758709012f9d92d1b543a9f67cb1240b..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/fastapi/main.py
+++ /dev/null
@@ -1,40 +0,0 @@
-from dotenv import load_dotenv
-
-load_dotenv()
-
-import logging
-import os
-import uvicorn
-from fastapi import FastAPI
-from fastapi.middleware.cors import CORSMiddleware
-from app.api.routers.chat import chat_router
-from app.settings import init_settings
-
-
-app = FastAPI()
-
-init_settings()
-
-environment = os.getenv("ENVIRONMENT", "dev")  # Default to 'development' if not set
-
-
-if environment == "dev":
-    logger = logging.getLogger("uvicorn")
-    logger.warning("Running in development mode - allowing CORS for all origins")
-    app.add_middleware(
-        CORSMiddleware,
-        allow_origins=["*"],
-        allow_credentials=True,
-        allow_methods=["*"],
-        allow_headers=["*"],
-    )
-
-app.include_router(chat_router, prefix="/api/chat")
-
-
-if __name__ == "__main__":
-    app_host = os.getenv("APP_HOST", "0.0.0.0")
-    app_port = int(os.getenv("APP_PORT", "8000"))
-    reload = True if environment == "dev" else False
-
-    uvicorn.run(app="main:app", host=app_host, port=app_port, reload=reload)
diff --git a/packages/create-llama/templates/types/streaming/fastapi/pyproject.toml b/packages/create-llama/templates/types/streaming/fastapi/pyproject.toml
deleted file mode 100644
index 45c3ae567a6a1b7392255a6aa49fe896dbb12f3f..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/fastapi/pyproject.toml
+++ /dev/null
@@ -1,17 +0,0 @@
-[tool.poetry]
-name = "app"
-version = "0.1.0"
-description = ""
-authors = ["Marcus Schiesser <mail@marcusschiesser.de>"]
-readme = "README.md"
-
-[tool.poetry.dependencies]
-python = "^3.11,<3.12"
-fastapi = "^0.109.1"
-uvicorn = { extras = ["standard"], version = "^0.23.2" }
-python-dotenv = "^1.0.0"
-llama-index = "^0.10.7"
-
-[build-system]
-requires = ["poetry-core"]
-build-backend = "poetry.core.masonry.api"
diff --git a/packages/create-llama/templates/types/streaming/fastapi/tests/__init__.py b/packages/create-llama/templates/types/streaming/fastapi/tests/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/packages/create-llama/templates/types/streaming/nextjs/.env.example b/packages/create-llama/templates/types/streaming/nextjs/.env.example
deleted file mode 100644
index 7ac0a01551a65a68003c2615d510269b5d6a77f6..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/.env.example
+++ /dev/null
@@ -1,3 +0,0 @@
-# Rename this file to `.env.local` to use environment variables locally with `next dev`
-# https://nextjs.org/docs/pages/building-your-application/configuring/environment-variables
-MY_HOST="example.com"
diff --git a/packages/create-llama/templates/types/streaming/nextjs/README-template.md b/packages/create-llama/templates/types/streaming/nextjs/README-template.md
deleted file mode 100644
index 1509ded7c3be489d369b94d6d6a286d496f488d8..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/README-template.md
+++ /dev/null
@@ -1,30 +0,0 @@
-This is a [LlamaIndex](https://www.llamaindex.ai/) project using [Next.js](https://nextjs.org/) bootstrapped with [`create-llama`](https://github.com/run-llama/LlamaIndexTS/tree/main/packages/create-llama).
-
-## Getting Started
-
-First, install the dependencies:
-
-```
-npm install
-```
-
-Second, run the development server:
-
-```
-npm run dev
-```
-
-Open [http://localhost:3000](http://localhost:3000) with your browser to see the result.
-
-You can start editing the page by modifying `app/page.tsx`. The page auto-updates as you edit the file.
-
-This project uses [`next/font`](https://nextjs.org/docs/basic-features/font-optimization) to automatically optimize and load Inter, a custom Google Font.
-
-## Learn More
-
-To learn more about LlamaIndex, take a look at the following resources:
-
-- [LlamaIndex Documentation](https://docs.llamaindex.ai) - learn about LlamaIndex (Python features).
-- [LlamaIndexTS Documentation](https://ts.llamaindex.ai) - learn about LlamaIndex (Typescript features).
-
-You can check out [the LlamaIndexTS GitHub repository](https://github.com/run-llama/LlamaIndexTS) - your feedback and contributions are welcome!
diff --git a/packages/create-llama/templates/types/streaming/nextjs/app/api/chat/engine/index.ts b/packages/create-llama/templates/types/streaming/nextjs/app/api/chat/engine/index.ts
deleted file mode 100644
index abb02e90cd2ce91096791bf10c4665afcbe11d38..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/app/api/chat/engine/index.ts
+++ /dev/null
@@ -1,7 +0,0 @@
-import { LLM, SimpleChatEngine } from "llamaindex";
-
-export async function createChatEngine(llm: LLM) {
-  return new SimpleChatEngine({
-    llm,
-  });
-}
diff --git a/packages/create-llama/templates/types/streaming/nextjs/app/api/chat/llamaindex-stream.ts b/packages/create-llama/templates/types/streaming/nextjs/app/api/chat/llamaindex-stream.ts
deleted file mode 100644
index 6ddd8eae68bc199188d07a0af8f27a12b2a6abb3..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/app/api/chat/llamaindex-stream.ts
+++ /dev/null
@@ -1,68 +0,0 @@
-import {
-  JSONValue,
-  createCallbacksTransformer,
-  createStreamDataTransformer,
-  experimental_StreamData,
-  trimStartOfStreamHelper,
-  type AIStreamCallbacksAndOptions,
-} from "ai";
-import { Response } from "llamaindex";
-
-type ParserOptions = {
-  image_url?: string;
-};
-
-function createParser(
-  res: AsyncIterable<Response>,
-  data: experimental_StreamData,
-  opts?: ParserOptions,
-) {
-  const it = res[Symbol.asyncIterator]();
-  const trimStartOfStream = trimStartOfStreamHelper();
-  return new ReadableStream<string>({
-    start() {
-      // if image_url is provided, send it via the data stream
-      if (opts?.image_url) {
-        const message: JSONValue = {
-          type: "image_url",
-          image_url: {
-            url: opts.image_url,
-          },
-        };
-        data.append(message);
-      } else {
-        data.append({}); // send an empty image response for the user's message
-      }
-    },
-    async pull(controller): Promise<void> {
-      const { value, done } = await it.next();
-      if (done) {
-        controller.close();
-        data.append({}); // send an empty image response for the assistant's message
-        data.close();
-        return;
-      }
-
-      const text = trimStartOfStream(value.response ?? "");
-      if (text) {
-        controller.enqueue(text);
-      }
-    },
-  });
-}
-
-export function LlamaIndexStream(
-  res: AsyncIterable<Response>,
-  opts?: {
-    callbacks?: AIStreamCallbacksAndOptions;
-    parserOptions?: ParserOptions;
-  },
-): { stream: ReadableStream; data: experimental_StreamData } {
-  const data = new experimental_StreamData();
-  return {
-    stream: createParser(res, data, opts?.parserOptions)
-      .pipeThrough(createCallbacksTransformer(opts?.callbacks))
-      .pipeThrough(createStreamDataTransformer(true)),
-    data,
-  };
-}
diff --git a/packages/create-llama/templates/types/streaming/nextjs/app/api/chat/route.ts b/packages/create-llama/templates/types/streaming/nextjs/app/api/chat/route.ts
deleted file mode 100644
index ef35bf76e427e27ea4f05460624c91017b2112b1..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/app/api/chat/route.ts
+++ /dev/null
@@ -1,84 +0,0 @@
-import { StreamingTextResponse } from "ai";
-import { ChatMessage, MessageContent, OpenAI } from "llamaindex";
-import { NextRequest, NextResponse } from "next/server";
-import { createChatEngine } from "./engine";
-import { LlamaIndexStream } from "./llamaindex-stream";
-
-export const runtime = "nodejs";
-export const dynamic = "force-dynamic";
-
-const convertMessageContent = (
-  textMessage: string,
-  imageUrl: string | undefined,
-): MessageContent => {
-  if (!imageUrl) return textMessage;
-  return [
-    {
-      type: "text",
-      text: textMessage,
-    },
-    {
-      type: "image_url",
-      image_url: {
-        url: imageUrl,
-      },
-    },
-  ];
-};
-
-export async function POST(request: NextRequest) {
-  try {
-    const body = await request.json();
-    const { messages, data }: { messages: ChatMessage[]; data: any } = body;
-    const userMessage = messages.pop();
-    if (!messages || !userMessage || userMessage.role !== "user") {
-      return NextResponse.json(
-        {
-          error:
-            "messages are required in the request body and the last message must be from the user",
-        },
-        { status: 400 },
-      );
-    }
-
-    const llm = new OpenAI({
-      model: (process.env.MODEL as any) ?? "gpt-3.5-turbo",
-      maxTokens: 512,
-    });
-
-    const chatEngine = await createChatEngine(llm);
-
-    // Convert message content from Vercel/AI format to LlamaIndex/OpenAI format
-    const userMessageContent = convertMessageContent(
-      userMessage.content,
-      data?.imageUrl,
-    );
-
-    // Calling LlamaIndex's ChatEngine to get a streamed response
-    const response = await chatEngine.chat({
-      message: userMessageContent,
-      chatHistory: messages,
-      stream: true,
-    });
-
-    // Transform LlamaIndex stream to Vercel/AI format
-    const { stream, data: streamData } = LlamaIndexStream(response, {
-      parserOptions: {
-        image_url: data?.imageUrl,
-      },
-    });
-
-    // Return a StreamingTextResponse, which can be consumed by the Vercel/AI client
-    return new StreamingTextResponse(stream, {}, streamData);
-  } catch (error) {
-    console.error("[LlamaIndex]", error);
-    return NextResponse.json(
-      {
-        error: (error as Error).message,
-      },
-      {
-        status: 500,
-      },
-    );
-  }
-}
diff --git a/packages/create-llama/templates/types/streaming/nextjs/app/components/chat-section.tsx b/packages/create-llama/templates/types/streaming/nextjs/app/components/chat-section.tsx
deleted file mode 100644
index 08afc25487999ad9809aa01c9f95c726c39db374..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/app/components/chat-section.tsx
+++ /dev/null
@@ -1,46 +0,0 @@
-"use client";
-
-import { useChat } from "ai/react";
-import { useMemo } from "react";
-import { insertDataIntoMessages } from "./transform";
-import { ChatInput, ChatMessages } from "./ui/chat";
-
-export default function ChatSection() {
-  const {
-    messages,
-    input,
-    isLoading,
-    handleSubmit,
-    handleInputChange,
-    reload,
-    stop,
-    data,
-  } = useChat({
-    api: process.env.NEXT_PUBLIC_CHAT_API,
-    headers: {
-      "Content-Type": "application/json", // using JSON because of vercel/ai 2.2.26
-    },
-  });
-
-  const transformedMessages = useMemo(() => {
-    return insertDataIntoMessages(messages, data);
-  }, [messages, data]);
-
-  return (
-    <div className="space-y-4 max-w-5xl w-full">
-      <ChatMessages
-        messages={transformedMessages}
-        isLoading={isLoading}
-        reload={reload}
-        stop={stop}
-      />
-      <ChatInput
-        input={input}
-        handleSubmit={handleSubmit}
-        handleInputChange={handleInputChange}
-        isLoading={isLoading}
-        multiModal={process.env.NEXT_PUBLIC_MODEL === "gpt-4-vision-preview"}
-      />
-    </div>
-  );
-}
diff --git a/packages/create-llama/templates/types/streaming/nextjs/app/components/header.tsx b/packages/create-llama/templates/types/streaming/nextjs/app/components/header.tsx
deleted file mode 100644
index 2b0e488f769eff6700a282c2a6a77dd8d0a4dac8..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/app/components/header.tsx
+++ /dev/null
@@ -1,28 +0,0 @@
-import Image from "next/image";
-
-export default function Header() {
-  return (
-    <div className="z-10 max-w-5xl w-full items-center justify-between font-mono text-sm lg:flex">
-      <p className="fixed left-0 top-0 flex w-full justify-center border-b border-gray-300 bg-gradient-to-b from-zinc-200 pb-6 pt-8 backdrop-blur-2xl dark:border-neutral-800 dark:bg-zinc-800/30 dark:from-inherit lg:static lg:w-auto  lg:rounded-xl lg:border lg:bg-gray-200 lg:p-4 lg:dark:bg-zinc-800/30">
-        Get started by editing&nbsp;
-        <code className="font-mono font-bold">app/page.tsx</code>
-      </p>
-      <div className="fixed bottom-0 left-0 flex h-48 w-full items-end justify-center bg-gradient-to-t from-white via-white dark:from-black dark:via-black lg:static lg:h-auto lg:w-auto lg:bg-none">
-        <a
-          href="https://www.llamaindex.ai/"
-          className="flex items-center justify-center font-nunito text-lg font-bold gap-2"
-        >
-          <span>Built by LlamaIndex</span>
-          <Image
-            className="rounded-xl"
-            src="/llama.png"
-            alt="Llama Logo"
-            width={40}
-            height={40}
-            priority
-          />
-        </a>
-      </div>
-    </div>
-  );
-}
diff --git a/packages/create-llama/templates/types/streaming/nextjs/app/components/transform.ts b/packages/create-llama/templates/types/streaming/nextjs/app/components/transform.ts
deleted file mode 100644
index 5af8fb3cba4845331ecf16ca7c4bf223606e5644..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/app/components/transform.ts
+++ /dev/null
@@ -1,19 +0,0 @@
-import { JSONValue, Message } from "ai";
-
-export const isValidMessageData = (rawData: JSONValue | undefined) => {
-  if (!rawData || typeof rawData !== "object") return false;
-  if (Object.keys(rawData).length === 0) return false;
-  return true;
-};
-
-export const insertDataIntoMessages = (
-  messages: Message[],
-  data: JSONValue[] | undefined,
-) => {
-  if (!data) return messages;
-  messages.forEach((message, i) => {
-    const rawData = data[i];
-    if (isValidMessageData(rawData)) message.data = rawData;
-  });
-  return messages;
-};
diff --git a/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/README-template.md b/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/README-template.md
deleted file mode 100644
index ebfcf48c999ef8e040b282adfd4312767f731bb5..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/README-template.md
+++ /dev/null
@@ -1 +0,0 @@
-Using the chat component from https://github.com/marcusschiesser/ui (based on https://ui.shadcn.com/)
diff --git a/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/button.tsx b/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/button.tsx
deleted file mode 100644
index 662b0404d83445eb7fca1ead6724e944610fdf25..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/button.tsx
+++ /dev/null
@@ -1,56 +0,0 @@
-import { Slot } from "@radix-ui/react-slot";
-import { cva, type VariantProps } from "class-variance-authority";
-import * as React from "react";
-
-import { cn } from "./lib/utils";
-
-const buttonVariants = cva(
-  "inline-flex items-center justify-center whitespace-nowrap rounded-md text-sm font-medium ring-offset-background transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:pointer-events-none disabled:opacity-50",
-  {
-    variants: {
-      variant: {
-        default: "bg-primary text-primary-foreground hover:bg-primary/90",
-        destructive:
-          "bg-destructive text-destructive-foreground hover:bg-destructive/90",
-        outline:
-          "border border-input bg-background hover:bg-accent hover:text-accent-foreground",
-        secondary:
-          "bg-secondary text-secondary-foreground hover:bg-secondary/80",
-        ghost: "hover:bg-accent hover:text-accent-foreground",
-        link: "text-primary underline-offset-4 hover:underline",
-      },
-      size: {
-        default: "h-10 px-4 py-2",
-        sm: "h-9 rounded-md px-3",
-        lg: "h-11 rounded-md px-8",
-        icon: "h-10 w-10",
-      },
-    },
-    defaultVariants: {
-      variant: "default",
-      size: "default",
-    },
-  },
-);
-
-export interface ButtonProps
-  extends React.ButtonHTMLAttributes<HTMLButtonElement>,
-    VariantProps<typeof buttonVariants> {
-  asChild?: boolean;
-}
-
-const Button = React.forwardRef<HTMLButtonElement, ButtonProps>(
-  ({ className, variant, size, asChild = false, ...props }, ref) => {
-    const Comp = asChild ? Slot : "button";
-    return (
-      <Comp
-        className={cn(buttonVariants({ variant, size, className }))}
-        ref={ref}
-        {...props}
-      />
-    );
-  },
-);
-Button.displayName = "Button";
-
-export { Button, buttonVariants };
diff --git a/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/chat/chat-actions.tsx b/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/chat/chat-actions.tsx
deleted file mode 100644
index 151ef61a945c49cacfd44c77cbdf7287b5967861..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/chat/chat-actions.tsx
+++ /dev/null
@@ -1,28 +0,0 @@
-import { PauseCircle, RefreshCw } from "lucide-react";
-
-import { Button } from "../button";
-import { ChatHandler } from "./chat.interface";
-
-export default function ChatActions(
-  props: Pick<ChatHandler, "stop" | "reload"> & {
-    showReload?: boolean;
-    showStop?: boolean;
-  },
-) {
-  return (
-    <div className="space-x-4">
-      {props.showStop && (
-        <Button variant="outline" size="sm" onClick={props.stop}>
-          <PauseCircle className="mr-2 h-4 w-4" />
-          Stop generating
-        </Button>
-      )}
-      {props.showReload && (
-        <Button variant="outline" size="sm" onClick={props.reload}>
-          <RefreshCw className="mr-2 h-4 w-4" />
-          Regenerate
-        </Button>
-      )}
-    </div>
-  );
-}
diff --git a/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/chat/chat-avatar.tsx b/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/chat/chat-avatar.tsx
deleted file mode 100644
index ce04e306a7164e49e7ea6950a55c4f5cedc2ee2a..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/chat/chat-avatar.tsx
+++ /dev/null
@@ -1,25 +0,0 @@
-import { User2 } from "lucide-react";
-import Image from "next/image";
-
-export default function ChatAvatar({ role }: { role: string }) {
-  if (role === "user") {
-    return (
-      <div className="flex h-8 w-8 shrink-0 select-none items-center justify-center rounded-md border bg-background shadow">
-        <User2 className="h-4 w-4" />
-      </div>
-    );
-  }
-
-  return (
-    <div className="flex h-8 w-8 shrink-0 select-none items-center justify-center rounded-md border bg-black text-white shadow">
-      <Image
-        className="rounded-md"
-        src="/llama.png"
-        alt="Llama Logo"
-        width={24}
-        height={24}
-        priority
-      />
-    </div>
-  );
-}
diff --git a/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/chat/chat-input.tsx b/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/chat/chat-input.tsx
deleted file mode 100644
index 435637e5ec94fdb9fe03faa3c3e1791a0be584bb..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/chat/chat-input.tsx
+++ /dev/null
@@ -1,84 +0,0 @@
-import { useState } from "react";
-import { Button } from "../button";
-import FileUploader from "../file-uploader";
-import { Input } from "../input";
-import UploadImagePreview from "../upload-image-preview";
-import { ChatHandler } from "./chat.interface";
-
-export default function ChatInput(
-  props: Pick<
-    ChatHandler,
-    | "isLoading"
-    | "input"
-    | "onFileUpload"
-    | "onFileError"
-    | "handleSubmit"
-    | "handleInputChange"
-  > & {
-    multiModal?: boolean;
-  },
-) {
-  const [imageUrl, setImageUrl] = useState<string | null>(null);
-
-  const onSubmit = (e: React.FormEvent<HTMLFormElement>) => {
-    if (imageUrl) {
-      props.handleSubmit(e, {
-        data: { imageUrl: imageUrl },
-      });
-      setImageUrl(null);
-      return;
-    }
-    props.handleSubmit(e);
-  };
-
-  const onRemovePreviewImage = () => setImageUrl(null);
-
-  const handleUploadImageFile = async (file: File) => {
-    const base64 = await new Promise<string>((resolve, reject) => {
-      const reader = new FileReader();
-      reader.readAsDataURL(file);
-      reader.onload = () => resolve(reader.result as string);
-      reader.onerror = (error) => reject(error);
-    });
-    setImageUrl(base64);
-  };
-
-  const handleUploadFile = async (file: File) => {
-    try {
-      if (props.multiModal && file.type.startsWith("image/")) {
-        return await handleUploadImageFile(file);
-      }
-      props.onFileUpload?.(file);
-    } catch (error: any) {
-      props.onFileError?.(error.message);
-    }
-  };
-
-  return (
-    <form
-      onSubmit={onSubmit}
-      className="rounded-xl bg-white p-4 shadow-xl space-y-4"
-    >
-      {imageUrl && (
-        <UploadImagePreview url={imageUrl} onRemove={onRemovePreviewImage} />
-      )}
-      <div className="flex w-full items-start justify-between gap-4 ">
-        <Input
-          autoFocus
-          name="message"
-          placeholder="Type a message"
-          className="flex-1"
-          value={props.input}
-          onChange={props.handleInputChange}
-        />
-        <FileUploader
-          onFileUpload={handleUploadFile}
-          onFileError={props.onFileError}
-        />
-        <Button type="submit" disabled={props.isLoading}>
-          Send message
-        </Button>
-      </div>
-    </form>
-  );
-}
diff --git a/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/chat/chat-message.tsx b/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/chat/chat-message.tsx
deleted file mode 100644
index 808d9b08019cd7cb0e8703fdf49c2cfd78625dbf..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/chat/chat-message.tsx
+++ /dev/null
@@ -1,64 +0,0 @@
-import { Check, Copy } from "lucide-react";
-
-import { JSONValue, Message } from "ai";
-import Image from "next/image";
-import { Button } from "../button";
-import ChatAvatar from "./chat-avatar";
-import Markdown from "./markdown";
-import { useCopyToClipboard } from "./use-copy-to-clipboard";
-
-interface ChatMessageImageData {
-  type: "image_url";
-  image_url: {
-    url: string;
-  };
-}
-
-// This component will parse message data and render the appropriate UI.
-function ChatMessageData({ messageData }: { messageData: JSONValue }) {
-  const { image_url, type } = messageData as unknown as ChatMessageImageData;
-  if (type === "image_url") {
-    return (
-      <div className="rounded-md max-w-[200px] shadow-md">
-        <Image
-          src={image_url.url}
-          width={0}
-          height={0}
-          sizes="100vw"
-          style={{ width: "100%", height: "auto" }}
-          alt=""
-        />
-      </div>
-    );
-  }
-  return null;
-}
-
-export default function ChatMessage(chatMessage: Message) {
-  const { isCopied, copyToClipboard } = useCopyToClipboard({ timeout: 2000 });
-  return (
-    <div className="flex items-start gap-4 pr-5 pt-5">
-      <ChatAvatar role={chatMessage.role} />
-      <div className="group flex flex-1 justify-between gap-2">
-        <div className="flex-1 space-y-4">
-          {chatMessage.data && (
-            <ChatMessageData messageData={chatMessage.data} />
-          )}
-          <Markdown content={chatMessage.content} />
-        </div>
-        <Button
-          onClick={() => copyToClipboard(chatMessage.content)}
-          size="icon"
-          variant="ghost"
-          className="h-8 w-8 opacity-0 group-hover:opacity-100"
-        >
-          {isCopied ? (
-            <Check className="h-4 w-4" />
-          ) : (
-            <Copy className="h-4 w-4" />
-          )}
-        </Button>
-      </div>
-    </div>
-  );
-}
diff --git a/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/chat/chat-messages.tsx b/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/chat/chat-messages.tsx
deleted file mode 100644
index abc3e52d7d047c769a58c3cecd404b86a7922a7d..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/chat/chat-messages.tsx
+++ /dev/null
@@ -1,62 +0,0 @@
-import { Loader2 } from "lucide-react";
-import { useEffect, useRef } from "react";
-
-import ChatActions from "./chat-actions";
-import ChatMessage from "./chat-message";
-import { ChatHandler } from "./chat.interface";
-
-export default function ChatMessages(
-  props: Pick<ChatHandler, "messages" | "isLoading" | "reload" | "stop">,
-) {
-  const scrollableChatContainerRef = useRef<HTMLDivElement>(null);
-  const messageLength = props.messages.length;
-  const lastMessage = props.messages[messageLength - 1];
-
-  const scrollToBottom = () => {
-    if (scrollableChatContainerRef.current) {
-      scrollableChatContainerRef.current.scrollTop =
-        scrollableChatContainerRef.current.scrollHeight;
-    }
-  };
-
-  const isLastMessageFromAssistant =
-    messageLength > 0 && lastMessage?.role !== "user";
-  const showReload =
-    props.reload && !props.isLoading && isLastMessageFromAssistant;
-  const showStop = props.stop && props.isLoading;
-
-  // `isPending` indicate
-  // that stream response is not yet received from the server,
-  // so we show a loading indicator to give a better UX.
-  const isPending = props.isLoading && !isLastMessageFromAssistant;
-
-  useEffect(() => {
-    scrollToBottom();
-  }, [messageLength, lastMessage]);
-
-  return (
-    <div className="w-full rounded-xl bg-white p-4 shadow-xl pb-0">
-      <div
-        className="flex h-[50vh] flex-col gap-5 divide-y overflow-y-auto pb-4"
-        ref={scrollableChatContainerRef}
-      >
-        {props.messages.map((m) => (
-          <ChatMessage key={m.id} {...m} />
-        ))}
-        {isPending && (
-          <div className="flex justify-center items-center pt-10">
-            <Loader2 className="h-4 w-4 animate-spin" />
-          </div>
-        )}
-      </div>
-      <div className="flex justify-end py-4">
-        <ChatActions
-          reload={props.reload}
-          stop={props.stop}
-          showReload={showReload}
-          showStop={showStop}
-        />
-      </div>
-    </div>
-  );
-}
diff --git a/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/chat/chat.interface.ts b/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/chat/chat.interface.ts
deleted file mode 100644
index 5b9f22539c97428160f22c459125def2ad1b61fe..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/chat/chat.interface.ts
+++ /dev/null
@@ -1,18 +0,0 @@
-import { Message } from "ai";
-
-export interface ChatHandler {
-  messages: Message[];
-  input: string;
-  isLoading: boolean;
-  handleSubmit: (
-    e: React.FormEvent<HTMLFormElement>,
-    ops?: {
-      data?: any;
-    },
-  ) => void;
-  handleInputChange: (e: React.ChangeEvent<HTMLInputElement>) => void;
-  reload?: () => void;
-  stop?: () => void;
-  onFileUpload?: (file: File) => Promise<void>;
-  onFileError?: (errMsg: string) => void;
-}
diff --git a/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/chat/codeblock.tsx b/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/chat/codeblock.tsx
deleted file mode 100644
index 014a0fc3afb58366508bf2ab5d4314b6bfc4abc7..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/chat/codeblock.tsx
+++ /dev/null
@@ -1,139 +0,0 @@
-"use client";
-
-import { Check, Copy, Download } from "lucide-react";
-import { FC, memo } from "react";
-import { Prism, SyntaxHighlighterProps } from "react-syntax-highlighter";
-import { coldarkDark } from "react-syntax-highlighter/dist/cjs/styles/prism";
-
-import { Button } from "../button";
-import { useCopyToClipboard } from "./use-copy-to-clipboard";
-
-// TODO: Remove this when @type/react-syntax-highlighter is updated
-const SyntaxHighlighter = Prism as unknown as FC<SyntaxHighlighterProps>;
-
-interface Props {
-  language: string;
-  value: string;
-}
-
-interface languageMap {
-  [key: string]: string | undefined;
-}
-
-export const programmingLanguages: languageMap = {
-  javascript: ".js",
-  python: ".py",
-  java: ".java",
-  c: ".c",
-  cpp: ".cpp",
-  "c++": ".cpp",
-  "c#": ".cs",
-  ruby: ".rb",
-  php: ".php",
-  swift: ".swift",
-  "objective-c": ".m",
-  kotlin: ".kt",
-  typescript: ".ts",
-  go: ".go",
-  perl: ".pl",
-  rust: ".rs",
-  scala: ".scala",
-  haskell: ".hs",
-  lua: ".lua",
-  shell: ".sh",
-  sql: ".sql",
-  html: ".html",
-  css: ".css",
-  // add more file extensions here, make sure the key is same as language prop in CodeBlock.tsx component
-};
-
-export const generateRandomString = (length: number, lowercase = false) => {
-  const chars = "ABCDEFGHJKLMNPQRSTUVWXY3456789"; // excluding similar looking characters like Z, 2, I, 1, O, 0
-  let result = "";
-  for (let i = 0; i < length; i++) {
-    result += chars.charAt(Math.floor(Math.random() * chars.length));
-  }
-  return lowercase ? result.toLowerCase() : result;
-};
-
-const CodeBlock: FC<Props> = memo(({ language, value }) => {
-  const { isCopied, copyToClipboard } = useCopyToClipboard({ timeout: 2000 });
-
-  const downloadAsFile = () => {
-    if (typeof window === "undefined") {
-      return;
-    }
-    const fileExtension = programmingLanguages[language] || ".file";
-    const suggestedFileName = `file-${generateRandomString(
-      3,
-      true,
-    )}${fileExtension}`;
-    const fileName = window.prompt("Enter file name" || "", suggestedFileName);
-
-    if (!fileName) {
-      // User pressed cancel on prompt.
-      return;
-    }
-
-    const blob = new Blob([value], { type: "text/plain" });
-    const url = URL.createObjectURL(blob);
-    const link = document.createElement("a");
-    link.download = fileName;
-    link.href = url;
-    link.style.display = "none";
-    document.body.appendChild(link);
-    link.click();
-    document.body.removeChild(link);
-    URL.revokeObjectURL(url);
-  };
-
-  const onCopy = () => {
-    if (isCopied) return;
-    copyToClipboard(value);
-  };
-
-  return (
-    <div className="codeblock relative w-full bg-zinc-950 font-sans">
-      <div className="flex w-full items-center justify-between bg-zinc-800 px-6 py-2 pr-4 text-zinc-100">
-        <span className="text-xs lowercase">{language}</span>
-        <div className="flex items-center space-x-1">
-          <Button variant="ghost" onClick={downloadAsFile} size="icon">
-            <Download />
-            <span className="sr-only">Download</span>
-          </Button>
-          <Button variant="ghost" size="icon" onClick={onCopy}>
-            {isCopied ? (
-              <Check className="h-4 w-4" />
-            ) : (
-              <Copy className="h-4 w-4" />
-            )}
-            <span className="sr-only">Copy code</span>
-          </Button>
-        </div>
-      </div>
-      <SyntaxHighlighter
-        language={language}
-        style={coldarkDark}
-        PreTag="div"
-        showLineNumbers
-        customStyle={{
-          width: "100%",
-          background: "transparent",
-          padding: "1.5rem 1rem",
-          borderRadius: "0.5rem",
-        }}
-        codeTagProps={{
-          style: {
-            fontSize: "0.9rem",
-            fontFamily: "var(--font-mono)",
-          },
-        }}
-      >
-        {value}
-      </SyntaxHighlighter>
-    </div>
-  );
-});
-CodeBlock.displayName = "CodeBlock";
-
-export { CodeBlock };
diff --git a/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/chat/index.ts b/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/chat/index.ts
deleted file mode 100644
index 112ef39a8bdf7ef1f3ed3183eb7bc596ab596362..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/chat/index.ts
+++ /dev/null
@@ -1,5 +0,0 @@
-import ChatInput from "./chat-input";
-import ChatMessages from "./chat-messages";
-
-export { type ChatHandler } from "./chat.interface";
-export { ChatInput, ChatMessages };
diff --git a/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/chat/markdown.tsx b/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/chat/markdown.tsx
deleted file mode 100644
index 3ca538051d69aac03135b367c8d8a7d01d7be592..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/chat/markdown.tsx
+++ /dev/null
@@ -1,59 +0,0 @@
-import { FC, memo } from "react";
-import ReactMarkdown, { Options } from "react-markdown";
-import remarkGfm from "remark-gfm";
-import remarkMath from "remark-math";
-
-import { CodeBlock } from "./codeblock";
-
-const MemoizedReactMarkdown: FC<Options> = memo(
-  ReactMarkdown,
-  (prevProps, nextProps) =>
-    prevProps.children === nextProps.children &&
-    prevProps.className === nextProps.className,
-);
-
-export default function Markdown({ content }: { content: string }) {
-  return (
-    <MemoizedReactMarkdown
-      className="prose dark:prose-invert prose-p:leading-relaxed prose-pre:p-0 break-words"
-      remarkPlugins={[remarkGfm, remarkMath]}
-      components={{
-        p({ children }) {
-          return <p className="mb-2 last:mb-0">{children}</p>;
-        },
-        code({ node, inline, className, children, ...props }) {
-          if (children.length) {
-            if (children[0] == "▍") {
-              return (
-                <span className="mt-1 animate-pulse cursor-default">▍</span>
-              );
-            }
-
-            children[0] = (children[0] as string).replace("`▍`", "▍");
-          }
-
-          const match = /language-(\w+)/.exec(className || "");
-
-          if (inline) {
-            return (
-              <code className={className} {...props}>
-                {children}
-              </code>
-            );
-          }
-
-          return (
-            <CodeBlock
-              key={Math.random()}
-              language={(match && match[1]) || ""}
-              value={String(children).replace(/\n$/, "")}
-              {...props}
-            />
-          );
-        },
-      }}
-    >
-      {content}
-    </MemoizedReactMarkdown>
-  );
-}
diff --git a/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/chat/use-copy-to-clipboard.tsx b/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/chat/use-copy-to-clipboard.tsx
deleted file mode 100644
index e011d69bdc2a3c0e2a2c187fa29b00f429e1c947..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/chat/use-copy-to-clipboard.tsx
+++ /dev/null
@@ -1,33 +0,0 @@
-"use client";
-
-import * as React from "react";
-
-export interface useCopyToClipboardProps {
-  timeout?: number;
-}
-
-export function useCopyToClipboard({
-  timeout = 2000,
-}: useCopyToClipboardProps) {
-  const [isCopied, setIsCopied] = React.useState<Boolean>(false);
-
-  const copyToClipboard = (value: string) => {
-    if (typeof window === "undefined" || !navigator.clipboard?.writeText) {
-      return;
-    }
-
-    if (!value) {
-      return;
-    }
-
-    navigator.clipboard.writeText(value).then(() => {
-      setIsCopied(true);
-
-      setTimeout(() => {
-        setIsCopied(false);
-      }, timeout);
-    });
-  };
-
-  return { isCopied, copyToClipboard };
-}
diff --git a/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/file-uploader.tsx b/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/file-uploader.tsx
deleted file mode 100644
index e42a267d18cbe76391d1decad0acca8fdf4dc295..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/file-uploader.tsx
+++ /dev/null
@@ -1,105 +0,0 @@
-"use client";
-
-import { Loader2, Paperclip } from "lucide-react";
-import { ChangeEvent, useState } from "react";
-import { buttonVariants } from "./button";
-import { cn } from "./lib/utils";
-
-export interface FileUploaderProps {
-  config?: {
-    inputId?: string;
-    fileSizeLimit?: number;
-    allowedExtensions?: string[];
-    checkExtension?: (extension: string) => string | null;
-    disabled: boolean;
-  };
-  onFileUpload: (file: File) => Promise<void>;
-  onFileError?: (errMsg: string) => void;
-}
-
-const DEFAULT_INPUT_ID = "fileInput";
-const DEFAULT_FILE_SIZE_LIMIT = 1024 * 1024 * 50; // 50 MB
-
-export default function FileUploader({
-  config,
-  onFileUpload,
-  onFileError,
-}: FileUploaderProps) {
-  const [uploading, setUploading] = useState(false);
-
-  const inputId = config?.inputId || DEFAULT_INPUT_ID;
-  const fileSizeLimit = config?.fileSizeLimit || DEFAULT_FILE_SIZE_LIMIT;
-  const allowedExtensions = config?.allowedExtensions;
-  const defaultCheckExtension = (extension: string) => {
-    if (allowedExtensions && !allowedExtensions.includes(extension)) {
-      return `Invalid file type. Please select a file with one of these formats: ${allowedExtensions!.join(
-        ",",
-      )}`;
-    }
-    return null;
-  };
-  const checkExtension = config?.checkExtension ?? defaultCheckExtension;
-
-  const isFileSizeExceeded = (file: File) => {
-    return file.size > fileSizeLimit;
-  };
-
-  const resetInput = () => {
-    const fileInput = document.getElementById(inputId) as HTMLInputElement;
-    fileInput.value = "";
-  };
-
-  const onFileChange = async (e: ChangeEvent<HTMLInputElement>) => {
-    const file = e.target.files?.[0];
-    if (!file) return;
-
-    setUploading(true);
-    await handleUpload(file);
-    resetInput();
-    setUploading(false);
-  };
-
-  const handleUpload = async (file: File) => {
-    const onFileUploadError = onFileError || window.alert;
-    const fileExtension = file.name.split(".").pop() || "";
-    const extensionFileError = checkExtension(fileExtension);
-    if (extensionFileError) {
-      return onFileUploadError(extensionFileError);
-    }
-
-    if (isFileSizeExceeded(file)) {
-      return onFileUploadError(
-        `File size exceeded. Limit is ${fileSizeLimit / 1024 / 1024} MB`,
-      );
-    }
-
-    await onFileUpload(file);
-  };
-
-  return (
-    <div className="self-stretch">
-      <input
-        type="file"
-        id={inputId}
-        style={{ display: "none" }}
-        onChange={onFileChange}
-        accept={allowedExtensions?.join(",")}
-        disabled={config?.disabled || uploading}
-      />
-      <label
-        htmlFor={inputId}
-        className={cn(
-          buttonVariants({ variant: "secondary", size: "icon" }),
-          "cursor-pointer",
-          uploading && "opacity-50",
-        )}
-      >
-        {uploading ? (
-          <Loader2 className="h-4 w-4 animate-spin" />
-        ) : (
-          <Paperclip className="-rotate-45 w-4 h-4" />
-        )}
-      </label>
-    </div>
-  );
-}
diff --git a/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/input.tsx b/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/input.tsx
deleted file mode 100644
index edfa129e623cca64fa706d7750e3635fecd1d628..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/input.tsx
+++ /dev/null
@@ -1,25 +0,0 @@
-import * as React from "react";
-
-import { cn } from "./lib/utils";
-
-export interface InputProps
-  extends React.InputHTMLAttributes<HTMLInputElement> {}
-
-const Input = React.forwardRef<HTMLInputElement, InputProps>(
-  ({ className, type, ...props }, ref) => {
-    return (
-      <input
-        type={type}
-        className={cn(
-          "flex h-10 w-full rounded-md border border-input bg-background px-3 py-2 text-sm ring-offset-background file:border-0 file:bg-transparent file:text-sm file:font-medium placeholder:text-muted-foreground focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:cursor-not-allowed disabled:opacity-50",
-          className,
-        )}
-        ref={ref}
-        {...props}
-      />
-    );
-  },
-);
-Input.displayName = "Input";
-
-export { Input };
diff --git a/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/lib/utils.ts b/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/lib/utils.ts
deleted file mode 100644
index a5ef193506d07d0459fec4f187af08283094d7c8..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/lib/utils.ts
+++ /dev/null
@@ -1,6 +0,0 @@
-import { clsx, type ClassValue } from "clsx";
-import { twMerge } from "tailwind-merge";
-
-export function cn(...inputs: ClassValue[]) {
-  return twMerge(clsx(inputs));
-}
diff --git a/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/upload-image-preview.tsx b/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/upload-image-preview.tsx
deleted file mode 100644
index 55ef6e9c2793ef4eb935422a9eedbfdb611a2304..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/app/components/ui/upload-image-preview.tsx
+++ /dev/null
@@ -1,32 +0,0 @@
-import { XCircleIcon } from "lucide-react";
-import Image from "next/image";
-import { cn } from "./lib/utils";
-
-export default function UploadImagePreview({
-  url,
-  onRemove,
-}: {
-  url: string;
-  onRemove: () => void;
-}) {
-  return (
-    <div className="relative w-20 h-20 group">
-      <Image
-        src={url}
-        alt="Uploaded image"
-        fill
-        className="object-cover w-full h-full rounded-xl hover:brightness-75"
-      />
-      <div
-        className={cn(
-          "absolute -top-2 -right-2 w-6 h-6 z-10 bg-gray-500 text-white rounded-full hidden group-hover:block",
-        )}
-      >
-        <XCircleIcon
-          className="w-6 h-6 bg-gray-500 text-white rounded-full"
-          onClick={onRemove}
-        />
-      </div>
-    </div>
-  );
-}
diff --git a/packages/create-llama/templates/types/streaming/nextjs/app/favicon.ico b/packages/create-llama/templates/types/streaming/nextjs/app/favicon.ico
deleted file mode 100644
index a1eaef62f2dfa895f1bbffc6595bb53d9604963e..0000000000000000000000000000000000000000
Binary files a/packages/create-llama/templates/types/streaming/nextjs/app/favicon.ico and /dev/null differ
diff --git a/packages/create-llama/templates/types/streaming/nextjs/app/globals.css b/packages/create-llama/templates/types/streaming/nextjs/app/globals.css
deleted file mode 100644
index 09b85ed2c912e25518ddebbfebaba69090f889f4..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/app/globals.css
+++ /dev/null
@@ -1,94 +0,0 @@
-@tailwind base;
-@tailwind components;
-@tailwind utilities;
-
-@layer base {
-  :root {
-    --background: 0 0% 100%;
-    --foreground: 222.2 47.4% 11.2%;
-
-    --muted: 210 40% 96.1%;
-    --muted-foreground: 215.4 16.3% 46.9%;
-
-    --popover: 0 0% 100%;
-    --popover-foreground: 222.2 47.4% 11.2%;
-
-    --border: 214.3 31.8% 91.4%;
-    --input: 214.3 31.8% 91.4%;
-
-    --card: 0 0% 100%;
-    --card-foreground: 222.2 47.4% 11.2%;
-
-    --primary: 222.2 47.4% 11.2%;
-    --primary-foreground: 210 40% 98%;
-
-    --secondary: 210 40% 96.1%;
-    --secondary-foreground: 222.2 47.4% 11.2%;
-
-    --accent: 210 40% 96.1%;
-    --accent-foreground: 222.2 47.4% 11.2%;
-
-    --destructive: 0 100% 50%;
-    --destructive-foreground: 210 40% 98%;
-
-    --ring: 215 20.2% 65.1%;
-
-    --radius: 0.5rem;
-  }
-
-  .dark {
-    --background: 224 71% 4%;
-    --foreground: 213 31% 91%;
-
-    --muted: 223 47% 11%;
-    --muted-foreground: 215.4 16.3% 56.9%;
-
-    --accent: 216 34% 17%;
-    --accent-foreground: 210 40% 98%;
-
-    --popover: 224 71% 4%;
-    --popover-foreground: 215 20.2% 65.1%;
-
-    --border: 216 34% 17%;
-    --input: 216 34% 17%;
-
-    --card: 224 71% 4%;
-    --card-foreground: 213 31% 91%;
-
-    --primary: 210 40% 98%;
-    --primary-foreground: 222.2 47.4% 1.2%;
-
-    --secondary: 222.2 47.4% 11.2%;
-    --secondary-foreground: 210 40% 98%;
-
-    --destructive: 0 63% 31%;
-    --destructive-foreground: 210 40% 98%;
-
-    --ring: 216 34% 17%;
-
-    --radius: 0.5rem;
-  }
-}
-
-@layer base {
-  * {
-    @apply border-border;
-  }
-  body {
-    @apply bg-background text-foreground;
-    font-feature-settings:
-      "rlig" 1,
-      "calt" 1;
-  }
-  .background-gradient {
-    background-color: #fff;
-    background-image: radial-gradient(
-        at 21% 11%,
-        rgba(186, 186, 233, 0.53) 0,
-        transparent 50%
-      ),
-      radial-gradient(at 85% 0, hsla(46, 57%, 78%, 0.52) 0, transparent 50%),
-      radial-gradient(at 91% 36%, rgba(194, 213, 255, 0.68) 0, transparent 50%),
-      radial-gradient(at 8% 40%, rgba(251, 218, 239, 0.46) 0, transparent 50%);
-  }
-}
diff --git a/packages/create-llama/templates/types/streaming/nextjs/app/layout.tsx b/packages/create-llama/templates/types/streaming/nextjs/app/layout.tsx
deleted file mode 100644
index fb097706274bdfe4690e4953bba24d7acea0a021..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/app/layout.tsx
+++ /dev/null
@@ -1,22 +0,0 @@
-import type { Metadata } from "next";
-import { Inter } from "next/font/google";
-import "./globals.css";
-
-const inter = Inter({ subsets: ["latin"] });
-
-export const metadata: Metadata = {
-  title: "Create Llama App",
-  description: "Generated by create-llama",
-};
-
-export default function RootLayout({
-  children,
-}: {
-  children: React.ReactNode;
-}) {
-  return (
-    <html lang="en">
-      <body className={inter.className}>{children}</body>
-    </html>
-  );
-}
diff --git a/packages/create-llama/templates/types/streaming/nextjs/app/page.tsx b/packages/create-llama/templates/types/streaming/nextjs/app/page.tsx
deleted file mode 100644
index ef00262b4a80049c70d66d1ceaced4afaabfd587..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/app/page.tsx
+++ /dev/null
@@ -1,11 +0,0 @@
-import Header from "@/app/components/header";
-import ChatSection from "./components/chat-section";
-
-export default function Home() {
-  return (
-    <main className="flex min-h-screen flex-col items-center gap-10 p-24 background-gradient">
-      <Header />
-      <ChatSection />
-    </main>
-  );
-}
diff --git a/packages/create-llama/templates/types/streaming/nextjs/eslintrc.json b/packages/create-llama/templates/types/streaming/nextjs/eslintrc.json
deleted file mode 100644
index bffb357a7122523ec94045523758c4b825b448ef..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/eslintrc.json
+++ /dev/null
@@ -1,3 +0,0 @@
-{
-  "extends": "next/core-web-vitals"
-}
diff --git a/packages/create-llama/templates/types/streaming/nextjs/gitignore b/packages/create-llama/templates/types/streaming/nextjs/gitignore
deleted file mode 100644
index 8f322f0d8f49570a594b865ef8916c428a01afc1..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/gitignore
+++ /dev/null
@@ -1,35 +0,0 @@
-# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
-
-# dependencies
-/node_modules
-/.pnp
-.pnp.js
-
-# testing
-/coverage
-
-# next.js
-/.next/
-/out/
-
-# production
-/build
-
-# misc
-.DS_Store
-*.pem
-
-# debug
-npm-debug.log*
-yarn-debug.log*
-yarn-error.log*
-
-# local env files
-.env*.local
-
-# vercel
-.vercel
-
-# typescript
-*.tsbuildinfo
-next-env.d.ts
diff --git a/packages/create-llama/templates/types/streaming/nextjs/next-env.d.ts b/packages/create-llama/templates/types/streaming/nextjs/next-env.d.ts
deleted file mode 100644
index 4f11a03dc6cc37f2b5105c08f2e7b24c603ab2f4..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/next-env.d.ts
+++ /dev/null
@@ -1,5 +0,0 @@
-/// <reference types="next" />
-/// <reference types="next/image-types/global" />
-
-// NOTE: This file should not be edited
-// see https://nextjs.org/docs/basic-features/typescript for more information.
diff --git a/packages/create-llama/templates/types/streaming/nextjs/next.config.json b/packages/create-llama/templates/types/streaming/nextjs/next.config.json
deleted file mode 100644
index 264e20ef32d0f82cadce202574ff63843479198a..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/next.config.json
+++ /dev/null
@@ -1,7 +0,0 @@
-{
-  "experimental": {
-    "outputFileTracingIncludes": {
-      "/*": ["./cache/**/*"]
-    }
-  }
-}
diff --git a/packages/create-llama/templates/types/streaming/nextjs/next.config.mjs b/packages/create-llama/templates/types/streaming/nextjs/next.config.mjs
deleted file mode 100644
index 124122bfaad262a8e291f9114971600670b0d02d..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/next.config.mjs
+++ /dev/null
@@ -1,8 +0,0 @@
-/** @type {import('next').NextConfig} */
-import fs from "fs";
-import webpack from "./webpack.config.mjs";
-
-const nextConfig = JSON.parse(fs.readFileSync("./next.config.json", "utf-8"));
-nextConfig.webpack = webpack;
-
-export default nextConfig;
diff --git a/packages/create-llama/templates/types/streaming/nextjs/package.json b/packages/create-llama/templates/types/streaming/nextjs/package.json
deleted file mode 100644
index 2f23029ddfba1cefcbf6e1243f19a85f1c2453d0..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/package.json
+++ /dev/null
@@ -1,43 +0,0 @@
-{
-  "name": "llama-index-nextjs-streaming",
-  "version": "1.0.0",
-  "scripts": {
-    "dev": "next dev",
-    "build": "next build",
-    "start": "next start",
-    "lint": "next lint"
-  },
-  "dependencies": {
-    "@radix-ui/react-slot": "^1.0.2",
-    "ai": "^2.2.27",
-    "class-variance-authority": "^0.7.0",
-    "clsx": "^1.2.1",
-    "dotenv": "^16.3.1",
-    "llamaindex": "0.0.37",
-    "lucide-react": "^0.294.0",
-    "next": "^14.0.3",
-    "react": "^18.2.0",
-    "react-dom": "^18.2.0",
-    "react-markdown": "^8.0.7",
-    "react-syntax-highlighter": "^15.5.0",
-    "remark": "^14.0.3",
-    "remark-code-import": "^1.2.0",
-    "remark-gfm": "^3.0.1",
-    "remark-math": "^5.1.1",
-    "supports-color": "^9.4.0",
-    "tailwind-merge": "^2.1.0"
-  },
-  "devDependencies": {
-    "@types/node": "^20.10.3",
-    "@types/react": "^18.2.42",
-    "@types/react-dom": "^18.2.17",
-    "autoprefixer": "^10.4.16",
-    "eslint": "^8.55.0",
-    "eslint-config-next": "^14.0.3",
-    "postcss": "^8.4.32",
-    "tailwindcss": "^3.3.6",
-    "typescript": "^5.3.2",
-    "@types/react-syntax-highlighter": "^15.5.11",
-    "cross-env": "^7.0.3"
-  }
-}
diff --git a/packages/create-llama/templates/types/streaming/nextjs/postcss.config.js b/packages/create-llama/templates/types/streaming/nextjs/postcss.config.js
deleted file mode 100644
index 12a703d900da8159c30e75acbd2c4d87ae177f62..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/postcss.config.js
+++ /dev/null
@@ -1,6 +0,0 @@
-module.exports = {
-  plugins: {
-    tailwindcss: {},
-    autoprefixer: {},
-  },
-};
diff --git a/packages/create-llama/templates/types/streaming/nextjs/public/llama.png b/packages/create-llama/templates/types/streaming/nextjs/public/llama.png
deleted file mode 100644
index d4efba3b816bf765439c6d01b322b02684e946c3..0000000000000000000000000000000000000000
Binary files a/packages/create-llama/templates/types/streaming/nextjs/public/llama.png and /dev/null differ
diff --git a/packages/create-llama/templates/types/streaming/nextjs/tailwind.config.ts b/packages/create-llama/templates/types/streaming/nextjs/tailwind.config.ts
deleted file mode 100644
index aa5580affac868255fedb5a8ddc0dde7a105c454..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/tailwind.config.ts
+++ /dev/null
@@ -1,78 +0,0 @@
-import type { Config } from "tailwindcss";
-import { fontFamily } from "tailwindcss/defaultTheme";
-
-const config: Config = {
-  darkMode: ["class"],
-  content: ["app/**/*.{ts,tsx}", "components/**/*.{ts,tsx}"],
-  theme: {
-    container: {
-      center: true,
-      padding: "2rem",
-      screens: {
-        "2xl": "1400px",
-      },
-    },
-    extend: {
-      colors: {
-        border: "hsl(var(--border))",
-        input: "hsl(var(--input))",
-        ring: "hsl(var(--ring))",
-        background: "hsl(var(--background))",
-        foreground: "hsl(var(--foreground))",
-        primary: {
-          DEFAULT: "hsl(var(--primary))",
-          foreground: "hsl(var(--primary-foreground))",
-        },
-        secondary: {
-          DEFAULT: "hsl(var(--secondary))",
-          foreground: "hsl(var(--secondary-foreground))",
-        },
-        destructive: {
-          DEFAULT: "hsl(var(--destructive) / <alpha-value>)",
-          foreground: "hsl(var(--destructive-foreground) / <alpha-value>)",
-        },
-        muted: {
-          DEFAULT: "hsl(var(--muted))",
-          foreground: "hsl(var(--muted-foreground))",
-        },
-        accent: {
-          DEFAULT: "hsl(var(--accent))",
-          foreground: "hsl(var(--accent-foreground))",
-        },
-        popover: {
-          DEFAULT: "hsl(var(--popover))",
-          foreground: "hsl(var(--popover-foreground))",
-        },
-        card: {
-          DEFAULT: "hsl(var(--card))",
-          foreground: "hsl(var(--card-foreground))",
-        },
-      },
-      borderRadius: {
-        xl: `calc(var(--radius) + 4px)`,
-        lg: `var(--radius)`,
-        md: `calc(var(--radius) - 2px)`,
-        sm: "calc(var(--radius) - 4px)",
-      },
-      fontFamily: {
-        sans: ["var(--font-sans)", ...fontFamily.sans],
-      },
-      keyframes: {
-        "accordion-down": {
-          from: { height: "0" },
-          to: { height: "var(--radix-accordion-content-height)" },
-        },
-        "accordion-up": {
-          from: { height: "var(--radix-accordion-content-height)" },
-          to: { height: "0" },
-        },
-      },
-      animation: {
-        "accordion-down": "accordion-down 0.2s ease-out",
-        "accordion-up": "accordion-up 0.2s ease-out",
-      },
-    },
-  },
-  plugins: [],
-};
-export default config;
diff --git a/packages/create-llama/templates/types/streaming/nextjs/tsconfig.json b/packages/create-llama/templates/types/streaming/nextjs/tsconfig.json
deleted file mode 100644
index 40c136b8255d33d2f7335e82f141d9d16005d878..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/tsconfig.json
+++ /dev/null
@@ -1,28 +0,0 @@
-{
-  "compilerOptions": {
-    "target": "es5",
-    "lib": ["dom", "dom.iterable", "esnext"],
-    "allowJs": true,
-    "skipLibCheck": true,
-    "strict": true,
-    "noEmit": true,
-    "esModuleInterop": true,
-    "module": "esnext",
-    "moduleResolution": "bundler",
-    "resolveJsonModule": true,
-    "isolatedModules": true,
-    "jsx": "preserve",
-    "incremental": true,
-    "plugins": [
-      {
-        "name": "next"
-      }
-    ],
-    "paths": {
-      "@/*": ["./*"]
-    },
-    "forceConsistentCasingInFileNames": true
-  },
-  "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"],
-  "exclude": ["node_modules"]
-}
diff --git a/packages/create-llama/templates/types/streaming/nextjs/webpack.config.mjs b/packages/create-llama/templates/types/streaming/nextjs/webpack.config.mjs
deleted file mode 100644
index 57fa19cfeccb4c8a416f26250164abe46597ab94..0000000000000000000000000000000000000000
--- a/packages/create-llama/templates/types/streaming/nextjs/webpack.config.mjs
+++ /dev/null
@@ -1,10 +0,0 @@
-// webpack config must be a function in NextJS that is used to patch the default webpack config provided by NextJS, see https://nextjs.org/docs/pages/api-reference/next-config-js/webpack
-export default function webpack(config) {
-  // See https://webpack.js.org/configuration/resolve/#resolvealias
-  config.resolve.alias = {
-    ...config.resolve.alias,
-    sharp$: false,
-    "onnxruntime-node$": false,
-  };
-  return config;
-}
diff --git a/packages/create-llama/tsconfig.json b/packages/create-llama/tsconfig.json
deleted file mode 100644
index 5d804d06aeb6649d54b44960aed59b646b84a62e..0000000000000000000000000000000000000000
--- a/packages/create-llama/tsconfig.json
+++ /dev/null
@@ -1,24 +0,0 @@
-{
-  "compilerOptions": {
-    "target": "es2019",
-    "module": "esnext",
-    "moduleResolution": "node",
-    "strict": true,
-    "resolveJsonModule": true,
-    "skipLibCheck": true,
-    "declaration": false,
-    "esModuleInterop": true,
-    "forceConsistentCasingInFileNames": true,
-    "incremental": true,
-    "outDir": "./lib",
-    "tsBuildInfoFile": "./lib/.tsbuildinfo"
-  },
-  "include": [
-    "create-app.ts",
-    "index.ts",
-    "./helpers",
-    "questions.ts",
-    "package.json"
-  ],
-  "exclude": ["dist"]
-}
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index 266021e4494a733db50fc6fc7910c2d208ca8fc1..e64988699293cd9e0bc949d8562feaac2c4d151a 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -302,87 +302,6 @@ importers:
         specifier: ^1.3.1
         version: 1.3.1
 
-  packages/create-llama:
-    devDependencies:
-      '@playwright/test':
-        specifier: ^1.41.1
-        version: 1.41.1
-      '@types/async-retry':
-        specifier: 1.4.2
-        version: 1.4.2
-      '@types/ci-info':
-        specifier: 2.0.0
-        version: 2.0.0
-      '@types/cross-spawn':
-        specifier: 6.0.0
-        version: 6.0.0
-      '@types/node':
-        specifier: ^20.11.7
-        version: 20.11.7
-      '@types/prompts':
-        specifier: 2.0.1
-        version: 2.0.1
-      '@types/tar':
-        specifier: 6.1.5
-        version: 6.1.5
-      '@types/validate-npm-package-name':
-        specifier: 3.0.0
-        version: 3.0.0
-      '@vercel/ncc':
-        specifier: 0.38.1
-        version: 0.38.1
-      async-retry:
-        specifier: 1.3.1
-        version: 1.3.1
-      async-sema:
-        specifier: 3.0.1
-        version: 3.0.1
-      ci-info:
-        specifier: github:watson/ci-info#f43f6a1cefff47fb361c88cf4b943fdbcaafe540
-        version: github.com/watson/ci-info/f43f6a1cefff47fb361c88cf4b943fdbcaafe540
-      commander:
-        specifier: 2.20.0
-        version: 2.20.0
-      conf:
-        specifier: 10.2.0
-        version: 10.2.0
-      cross-spawn:
-        specifier: 7.0.3
-        version: 7.0.3
-      fast-glob:
-        specifier: 3.3.1
-        version: 3.3.1
-      got:
-        specifier: 10.7.0
-        version: 10.7.0
-      picocolors:
-        specifier: 1.0.0
-        version: 1.0.0
-      prompts:
-        specifier: 2.1.0
-        version: 2.1.0
-      rimraf:
-        specifier: ^5.0.5
-        version: 5.0.5
-      smol-toml:
-        specifier: ^1.1.4
-        version: 1.1.4
-      tar:
-        specifier: 6.1.15
-        version: 6.1.15
-      terminal-link:
-        specifier: ^3.0.0
-        version: 3.0.0
-      update-check:
-        specifier: 1.5.4
-        version: 1.5.4
-      validate-npm-package-name:
-        specifier: 3.0.0
-        version: 3.0.0
-      wait-port:
-        specifier: ^1.1.0
-        version: 1.1.0
-
   packages/env:
     dependencies:
       '@types/lodash':
@@ -3609,14 +3528,6 @@ packages:
     dev: true
     optional: true
 
-  /@playwright/test@1.41.1:
-    resolution: {integrity: sha512-9g8EWTjiQ9yFBXc6HjCWe41msLpxEX0KhmfmPl9RPLJdfzL4F0lg2BdJ91O9azFdl11y1pmpwdjBiSxvqc+btw==}
-    engines: {node: '>=16'}
-    hasBin: true
-    dependencies:
-      playwright: 1.41.1
-    dev: true
-
   /@pnpm/config.env-replace@1.1.0:
     resolution: {integrity: sha512-htyl8TWnKL7K/ESFa1oW2UB5lVDxuF5DpM7tBi6Hu2LNL3mWkIzNLG6N4zoCUP1lCKNxWy/3iu8mS8MvToGd6w==}
     engines: {node: '>=12.22.0'}
@@ -3828,11 +3739,6 @@ packages:
     resolution: {integrity: sha512-aX5IFYWlMa7tQ8xZr3b2gtVReCvg7f3LEhjir/JAjX2bJCMVJA5tIPv30wTD4KDfcwMd7DDYY3hFDeGmOgtrZQ==}
     dev: false
 
-  /@sindresorhus/is@2.1.1:
-    resolution: {integrity: sha512-/aPsuoj/1Dw/kzhkgz+ES6TxG0zfTMGLwuK2ZG00k/iJzYHTLCE8mVU8EPqEOp/lmxPoq1C1C9RYToRKb2KEfg==}
-    engines: {node: '>=10'}
-    dev: true
-
   /@sindresorhus/is@4.6.0:
     resolution: {integrity: sha512-t09vSN3MdfsyCHoFcTRCH/iUtG7OJ0CsjzB8cjAmKc/va/kIgeDI/TxsigdncE/4be734m0cvIYwNaV4i2XqAw==}
     engines: {node: '>=10'}
@@ -4218,12 +4124,6 @@ packages:
     dependencies:
       '@types/estree': 1.0.5
 
-  /@types/async-retry@1.4.2:
-    resolution: {integrity: sha512-GUDuJURF0YiJZ+CBjNQA0+vbP/VHlJbB0sFqkzsV7EcOPRfurVonXpXKAt3w8qIjM1TEzpz6hc6POocPvHOS3w==}
-    dependencies:
-      '@types/retry': 0.12.5
-    dev: true
-
   /@types/body-parser@1.19.5:
     resolution: {integrity: sha512-fB3Zu92ucau0iQ0JMCFQE7b/dv8Ot07NI3KaZIkIUNXq82k4eBAqUaneXfleGY9JWskeS9y+u0nXMyspcuQrCg==}
     dependencies:
@@ -4244,10 +4144,6 @@ packages:
       '@types/responselike': 1.0.3
     dev: true
 
-  /@types/ci-info@2.0.0:
-    resolution: {integrity: sha512-5R2/MHILQLDCzTuhs1j4Qqq8AaKUf7Ma4KSSkCtc12+fMs47zfa34qhto9goxpyX00tQK1zxB885VCiawZ5Qhg==}
-    dev: true
-
   /@types/connect-history-api-fallback@1.5.4:
     resolution: {integrity: sha512-n6Cr2xS1h4uAulPRdlw6Jl6s1oG8KrVilPN2yUITEs+K48EzMJJ3W1xy8K5eWuFvjp3R74AOIGSmp2UfBJ8HFw==}
     dependencies:
@@ -4259,12 +4155,6 @@ packages:
     dependencies:
       '@types/node': 20.11.20
 
-  /@types/cross-spawn@6.0.0:
-    resolution: {integrity: sha512-evp2ZGsFw9YKprDbg8ySgC9NA15g3YgiI8ANkGmKKvvi0P2aDGYLPxQIC5qfeKNUOe3TjABVGuah6omPRpIYhg==}
-    dependencies:
-      '@types/node': 20.11.20
-    dev: true
-
   /@types/debug@4.1.12:
     resolution: {integrity: sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==}
     dependencies:
@@ -4442,12 +4332,6 @@ packages:
     dependencies:
       undici-types: 5.26.5
 
-  /@types/node@20.11.7:
-    resolution: {integrity: sha512-GPmeN1C3XAyV5uybAf4cMLWT9fDWcmQhZVtMFu7OR32WjrqGG+Wnk2V1d0bmtUyE/Zy1QJ9BxyiTih9z8Oks8A==}
-    dependencies:
-      undici-types: 5.26.5
-    dev: true
-
   /@types/normalize-package-data@2.4.4:
     resolution: {integrity: sha512-37i+OaWTh9qeK4LSHPsyRC7NahnGotNuZvjLSgcPzblpHB3rrCJxAOgI5gCdKm7coonsaX1Of0ILiTcnZjbfxA==}
     dev: true
@@ -4472,10 +4356,6 @@ packages:
   /@types/prismjs@1.26.3:
     resolution: {integrity: sha512-A0D0aTXvjlqJ5ZILMz3rNfDBOx9hHxLZYv2by47Sm/pqW35zzjusrZTryatjN/Rf8Us2gZrJD+KeHbUSTux1Cw==}
 
-  /@types/prompts@2.0.1:
-    resolution: {integrity: sha512-AhtMcmETelF8wFDV1ucbChKhLgsc+ytXZXkNz/nnTAMSDeqsjALknEFxi7ZtLgS/G8bV2rp90LhDW5SGACimIQ==}
-    dev: true
-
   /@types/prop-types@15.7.11:
     resolution: {integrity: sha512-ga8y9v9uyeiLdpKddhxYQkxNDrfvuPrlFb0N1qnZZByvcElJaXthF1UhvCh9TLWJBEHeNtdnbysW7Y6Uq8CVng==}
 
@@ -4542,10 +4422,6 @@ packages:
   /@types/retry@0.12.0:
     resolution: {integrity: sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==}
 
-  /@types/retry@0.12.5:
-    resolution: {integrity: sha512-3xSjTp3v03X/lSQLkczaN9UIEwJMoMCA1+Nb5HfbJEQWogdeQIyVtTvxPXDQjZ5zws8rFQfVfRdz03ARihPJgw==}
-    dev: true
-
   /@types/sax@1.2.7:
     resolution: {integrity: sha512-rO73L89PJxeYM3s3pPPjiPgVVcymqU490g0YO5n5By0k2Erzj6tay/4lr1CHAAU4JyOWd1rpQ8bCf6cZfHU96A==}
     dependencies:
@@ -4585,13 +4461,6 @@ packages:
     dependencies:
       '@types/node': 20.11.20
 
-  /@types/tar@6.1.5:
-    resolution: {integrity: sha512-qm2I/RlZij5RofuY7vohTpYNaYcrSQlN2MyjucQc7ZweDwaEWkdN/EeNh6e9zjK6uEm6PwjdMXkcj05BxZdX1Q==}
-    dependencies:
-      '@types/node': 20.11.20
-      minipass: 4.2.8
-    dev: true
-
   /@types/triple-beam@1.3.5:
     resolution: {integrity: sha512-6WaYesThRMCl19iryMYP7/x2OVgCtbIVflDGFpWnb9irXI3UjYE4AzmYuiUKY1AJstGijoY+MgUszMgRxIYTYw==}
     dev: false
@@ -4602,10 +4471,6 @@ packages:
   /@types/unist@3.0.2:
     resolution: {integrity: sha512-dqId9J8K/vGi5Zr7oo212BGii5m3q5Hxlkwy3WpYuKPklmBEvsbMYYyLxAQpSffdLl/gdW0XUpKWFvYmyoWCoQ==}
 
-  /@types/validate-npm-package-name@3.0.0:
-    resolution: {integrity: sha512-iFNNIrEaJH1lbPiyX+O/QyxSbKxrTjdNBVZGckt+iEL9So0hdZNBL68sOfHnt2txuUD8UJXvmKv/1DkgkebgUg==}
-    dev: true
-
   /@types/webidl-conversions@7.0.2:
     resolution: {integrity: sha512-uNv6b/uGRLlCVmelat2rA8bcVd3k/42mV2EmjhPh6JLkd35T5bgwR/t6xy7a9MWhd9sixIeBUzhBenvk3NO+DQ==}
     dev: false
@@ -4764,11 +4629,6 @@ packages:
   /@ungap/structured-clone@1.2.0:
     resolution: {integrity: sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==}
 
-  /@vercel/ncc@0.38.1:
-    resolution: {integrity: sha512-IBBb+iI2NLu4VQn3Vwldyi2QwaXt5+hTyh58ggAMoCGE6DJmPvwL3KPBWcJl1m9LYPChBLE980Jw+CS4Wokqxw==}
-    hasBin: true
-    dev: true
-
   /@vitest/expect@1.3.1:
     resolution: {integrity: sha512-xofQFwIzfdmLLlHa6ag0dPV8YsnKOCP1KdAeVVh34vSjN2dcUiXYCD9htu/9eM7t8Xln4v03U9HLxLpPlsXdZw==}
     dependencies:
@@ -5065,13 +4925,6 @@ packages:
     engines: {node: '>=6'}
     dev: true
 
-  /ansi-escapes@5.0.0:
-    resolution: {integrity: sha512-5GFMVX8HqE/TB+FuBJGuO5XG0WrsA6ptUqoODaT/n9mmUaZFkqnBueB4leqGBCmrUHnCnC4PCZTCd0E7QQ83bA==}
-    engines: {node: '>=12'}
-    dependencies:
-      type-fest: 1.4.0
-    dev: true
-
   /ansi-escapes@6.2.0:
     resolution: {integrity: sha512-kzRaCqXnpzWs+3z5ABPQiVke+iq0KXkHo8xiWV4RPTi5Yli0l97BEQuhXV1s7+aSU/fu1kUuxgS4MsQ0fRuygw==}
     engines: {node: '>=14.16'}
@@ -5295,16 +5148,6 @@ packages:
     resolution: {integrity: sha512-ISvCdHdlTDlH5IpxQJIex7BWBywFWgjJSVdwst+/iQCoEYnyOaQ95+X1JGshuBjGp6nxKUy1jMgE3zPqN7fQdg==}
     hasBin: true
 
-  /async-retry@1.3.1:
-    resolution: {integrity: sha512-aiieFW/7h3hY0Bq5d+ktDBejxuwR78vRu9hDUdR8rNhSaQ29VzPL4AoIRG7D/c7tdenwOcKvgPM6tIxB3cB6HA==}
-    dependencies:
-      retry: 0.12.0
-    dev: true
-
-  /async-sema@3.0.1:
-    resolution: {integrity: sha512-fKT2riE8EHAvJEfLJXZiATQWqZttjx1+tfgnVshCDrH8vlw4YC8aECe0B8MU184g+aVRFVgmfxFlKZKaozSrNw==}
-    dev: true
-
   /async@3.2.5:
     resolution: {integrity: sha512-baNZyqaaLhyLVKm/DlvdW051MSgO6b8eVfIezl9E5PqWxFgzLm/wQntEW4zOytVburDEr0JlALEpdOFwvErLsg==}
     dev: false
@@ -5323,11 +5166,6 @@ packages:
     resolution: {integrity: sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==}
     engines: {node: '>= 4.0.0'}
 
-  /atomically@1.7.0:
-    resolution: {integrity: sha512-Xcz9l0z7y9yQ9rdDaxlmaI4uJHf/T8g9hOEzJcsEqX2SjCj4J20uK7+ldkDHMbpJDK76wF7xEIgxc/vSlsfw5w==}
-    engines: {node: '>=10.12.0'}
-    dev: true
-
   /autoprefixer@10.4.17(postcss@8.4.33):
     resolution: {integrity: sha512-/cpVNRLSfhOtcGflT13P2794gVSgmPgTR+erw5ifnMLZb0UnSlkK4tquLmkd3BhA+nLo5tX8Cu0upUsGKvKbmg==}
     engines: {node: ^10 || ^12 || >=14}
@@ -5614,10 +5452,6 @@ packages:
     dev: false
     optional: true
 
-  /builtins@1.0.3:
-    resolution: {integrity: sha512-uYBjakWipfaO/bXI7E8rq6kpwHRZK5cNYrUv2OzZSI/FvmdMyXJ2tG9dKcjEC5YHmHpUAwsargWIZNWdxb/bnQ==}
-    dev: true
-
   /busboy@1.6.0:
     resolution: {integrity: sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==}
     engines: {node: '>=10.16.0'}
@@ -5638,14 +5472,6 @@ packages:
     engines: {node: '>=8'}
     dev: true
 
-  /cacheable-lookup@2.0.1:
-    resolution: {integrity: sha512-EMMbsiOTcdngM/K6gV/OxF2x0t07+vMOWxZNSCRQMjO2MY2nhZQ6OYhOOpyQrbhqsgtvKGI7hcq6xjnA92USjg==}
-    engines: {node: '>=10'}
-    dependencies:
-      '@types/keyv': 3.1.4
-      keyv: 4.5.4
-    dev: true
-
   /cacheable-lookup@5.0.4:
     resolution: {integrity: sha512-2/kNscPhpcxrOigMZzbiWF7dz8ilhb/nIHU3EyZiXWXpeq/au8qJ8VhdftMkty3n7Gj6HIGalQG8oiBNB3AJgA==}
     engines: {node: '>=10.6.0'}
@@ -5841,11 +5667,6 @@ packages:
     resolution: {integrity: sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==}
     dev: false
 
-  /chownr@2.0.0:
-    resolution: {integrity: sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ==}
-    engines: {node: '>=10'}
-    dev: true
-
   /chromadb@1.7.3(cohere-ai@7.7.5)(openai@4.26.1):
     resolution: {integrity: sha512-3GgvQjpqgk5C89x5EuTDaXKbfrdqYDJ5UVyLQ3ZmwxnpetNc+HhRDGjkvXa5KSvpQ3lmKoyDoqnN4tZepfFkbw==}
     engines: {node: '>=14.17.0'}
@@ -6156,22 +5977,6 @@ packages:
       yargs: 17.7.2
     dev: true
 
-  /conf@10.2.0:
-    resolution: {integrity: sha512-8fLl9F04EJqjSqH+QjITQfJF8BrOVaYr1jewVgSRAEWePfxT0sku4w2hrGQ60BC/TNLGQ2pgxNlTbWQmMPFvXg==}
-    engines: {node: '>=12'}
-    dependencies:
-      ajv: 8.12.0
-      ajv-formats: 2.1.1(ajv@8.12.0)
-      atomically: 1.7.0
-      debounce-fn: 4.0.0
-      dot-prop: 6.0.1
-      env-paths: 2.2.1
-      json-schema-typed: 7.0.3
-      onetime: 5.1.2
-      pkg-up: 3.1.0
-      semver: 7.5.4
-    dev: true
-
   /config-chain@1.1.13:
     resolution: {integrity: sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ==}
     dependencies:
@@ -6539,13 +6344,6 @@ packages:
     resolution: {integrity: sha512-vjAczensTgRcqDERK0SR2XMwsF/tSvnvlv6VcF2GIhg6Sx4yOIt/irsr1RDJsKiIyBzJDpCoXiWWq28MqH2cnQ==}
     dev: false
 
-  /debounce-fn@4.0.0:
-    resolution: {integrity: sha512-8pYCQiL9Xdcg0UPSD3d+0KMlOjp+KGU5EPwYddgzQ7DATsg4fuUDjQtsYLmWjnk2obnNHgV3vE2Y4jejSOJVBQ==}
-    engines: {node: '>=10'}
-    dependencies:
-      mimic-fn: 3.1.0
-    dev: true
-
   /debounce@1.2.1:
     resolution: {integrity: sha512-XRRe6Glud4rd/ZGQfiV1ruXSfbvfJedlV9Y6zOlP+2K04vBYiJEte6stfFkCP03aMnY5tsipamumUjL14fofug==}
 
@@ -6599,13 +6397,6 @@ packages:
     dependencies:
       character-entities: 2.0.2
 
-  /decompress-response@5.0.0:
-    resolution: {integrity: sha512-TLZWWybuxWgoW7Lykv+gq9xvzOsUjQ9tF09Tj6NSTYGMTCHNXzrPnD6Hi+TgZq19PyTAGH4Ll/NIM/eTGglnMw==}
-    engines: {node: '>=10'}
-    dependencies:
-      mimic-response: 2.1.0
-    dev: true
-
   /decompress-response@6.0.0:
     resolution: {integrity: sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==}
     engines: {node: '>=10'}
@@ -7048,10 +6839,6 @@ packages:
       underscore: 1.13.6
     dev: false
 
-  /duplexer3@0.1.5:
-    resolution: {integrity: sha512-1A8za6ws41LQgv9HrE/66jyC5yuSjQ3L/KOpFtoBilsAK2iA2wuS5rTt1OCzIvtS2V7nVmedsUU+DGRcjBmOYA==}
-    dev: true
-
   /duplexer@0.1.2:
     resolution: {integrity: sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==}
 
@@ -7137,11 +6924,6 @@ packages:
     resolution: {integrity: sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==}
     engines: {node: '>=0.12'}
 
-  /env-paths@2.2.1:
-    resolution: {integrity: sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A==}
-    engines: {node: '>=6'}
-    dev: true
-
   /error-ex@1.3.2:
     resolution: {integrity: sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==}
     dependencies:
@@ -8339,13 +8121,6 @@ packages:
       jsonfile: 6.1.0
       universalify: 2.0.1
 
-  /fs-minipass@2.1.0:
-    resolution: {integrity: sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==}
-    engines: {node: '>= 8'}
-    dependencies:
-      minipass: 3.3.6
-    dev: true
-
   /fs-monkey@1.0.5:
     resolution: {integrity: sha512-8uMbBjrhzW76TYgEV27Y5E//W2f/lTFmx78P2w19FZSxarhI/798APGQyuGCwmkNxgwGRhrLfvWyLBvNtuOmew==}
 
@@ -8362,14 +8137,6 @@ packages:
   /fs.realpath@1.0.0:
     resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==}
 
-  /fsevents@2.3.2:
-    resolution: {integrity: sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==}
-    engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0}
-    os: [darwin]
-    requiresBuild: true
-    dev: true
-    optional: true
-
   /fsevents@2.3.3:
     resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==}
     engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0}
@@ -8605,29 +8372,6 @@ packages:
     dependencies:
       get-intrinsic: 1.2.2
 
-  /got@10.7.0:
-    resolution: {integrity: sha512-aWTDeNw9g+XqEZNcTjMMZSy7B7yE9toWOFYip7ofFTLleJhvZwUxxTxkTpKvF+p1SAA4VHmuEy7PiHTHyq8tJg==}
-    engines: {node: '>=10'}
-    dependencies:
-      '@sindresorhus/is': 2.1.1
-      '@szmarczak/http-timer': 4.0.6
-      '@types/cacheable-request': 6.0.3
-      '@types/keyv': 3.1.4
-      '@types/responselike': 1.0.3
-      cacheable-lookup: 2.0.1
-      cacheable-request: 7.0.4
-      decompress-response: 5.0.0
-      duplexer3: 0.1.5
-      get-stream: 5.2.0
-      lowercase-keys: 2.0.0
-      mimic-response: 2.1.0
-      p-cancelable: 2.1.1
-      p-event: 4.2.0
-      responselike: 2.0.1
-      to-readable-stream: 2.1.0
-      type-fest: 0.10.0
-    dev: true
-
   /got@11.8.6:
     resolution: {integrity: sha512-6tfZ91bOr7bOXnK7PRDCGBLa1H4U080YHNaAQ2KsMGlLEzRbk44nsZF2E1IeRc3vtJHPVbKCYgdFbaGO2ljd8g==}
     engines: {node: '>=10.19.0'}
@@ -9664,10 +9408,6 @@ packages:
   /json-schema-traverse@1.0.0:
     resolution: {integrity: sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==}
 
-  /json-schema-typed@7.0.3:
-    resolution: {integrity: sha512-7DE8mpG+/fVw+dTpjbxnx47TaMnDfOI1jwft9g1VybltZCduyRQPJPvc+zzKY9WPHxhPWczyFuYa6I8Mw4iU5A==}
-    dev: true
-
   /json-stable-stringify-without-jsonify@1.0.1:
     resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==}
 
@@ -10773,11 +10513,6 @@ packages:
     resolution: {integrity: sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==}
     engines: {node: '>=6'}
 
-  /mimic-fn@3.1.0:
-    resolution: {integrity: sha512-Ysbi9uYW9hFyfrThdDEQuykN4Ey6BuwPD2kpI5ES/nFTDn/98yxYNLZJcgUAKPT/mcrLLKaGzJR9YVxJrIdASQ==}
-    engines: {node: '>=8'}
-    dev: true
-
   /mimic-fn@4.0.0:
     resolution: {integrity: sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==}
     engines: {node: '>=12'}
@@ -10788,11 +10523,6 @@ packages:
     engines: {node: '>=4'}
     dev: true
 
-  /mimic-response@2.1.0:
-    resolution: {integrity: sha512-wXqjST+SLt7R009ySCglWBCFpjUygmCIfD790/kVbiGmUgfYGuB14PiTd5DwVxSV4NcYHjzMkoj5LjQZwTQLEA==}
-    engines: {node: '>=8'}
-    dev: true
-
   /mimic-response@3.1.0:
     resolution: {integrity: sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==}
     engines: {node: '>=10'}
@@ -10841,31 +10571,11 @@ packages:
   /minimist@1.2.8:
     resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==}
 
-  /minipass@3.3.6:
-    resolution: {integrity: sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==}
-    engines: {node: '>=8'}
-    dependencies:
-      yallist: 4.0.0
-    dev: true
-
-  /minipass@4.2.8:
-    resolution: {integrity: sha512-fNzuVyifolSLFL4NzpF+wEF4qrgqaaKX0haXPQEdQ7NKAN+WecoKMHV09YcuL/DHxrUsYQOK3MiuDf7Ip2OXfQ==}
-    engines: {node: '>=8'}
-    dev: true
-
   /minipass@5.0.0:
     resolution: {integrity: sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==}
     engines: {node: '>=8'}
     dev: true
 
-  /minizlib@2.1.2:
-    resolution: {integrity: sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==}
-    engines: {node: '>= 8'}
-    dependencies:
-      minipass: 3.3.6
-      yallist: 4.0.0
-    dev: true
-
   /mixme@0.5.10:
     resolution: {integrity: sha512-5H76ANWinB1H3twpJ6JY8uvAtpmFvHNArpilJAjXRKXSDDLPIMoZArw5SH0q9z+lLs8IrMw7Q2VWpWimFKFT1Q==}
     engines: {node: '>= 8.0.0'}
@@ -10875,12 +10585,6 @@ packages:
     resolution: {integrity: sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==}
     dev: false
 
-  /mkdirp@1.0.4:
-    resolution: {integrity: sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==}
-    engines: {node: '>=10'}
-    hasBin: true
-    dev: true
-
   /mlly@1.6.0:
     resolution: {integrity: sha512-YOvg9hfYQmnaB56Yb+KrJE2u0Yzz5zR+sLejEvF4fzwzV1Al6hkf2vyHTwqCRyv0hCi9rVCqVoXpyYevQIRwLQ==}
     dependencies:
@@ -11473,13 +11177,6 @@ packages:
     resolution: {integrity: sha512-mlVgR3PGuzlo0MmTdk4cXqXWlwQDLnONTAg6sm62XkMJEiRxN3GL3SffkYvqwonbkJBcrI7Uvv5Zh9yjvn2iUw==}
     engines: {node: '>=12.20'}
 
-  /p-event@4.2.0:
-    resolution: {integrity: sha512-KXatOjCRXXkSePPb1Nbi0p0m+gQAwdlbhi4wQKJPI1HsMQS9g+Sqp2o+QHziPr7eYJyOZet836KoHEVM1mwOrQ==}
-    engines: {node: '>=8'}
-    dependencies:
-      p-timeout: 3.2.0
-    dev: true
-
   /p-filter@2.1.0:
     resolution: {integrity: sha512-ZBxxZ5sL2HghephhpGAQdoskxplTwr7ICaehZwLIlfL6acuVgZPm8yBNuRAFBGEqtD/hmUeq9eqLg2ys9Xr/yw==}
     engines: {node: '>=8'}
@@ -11560,13 +11257,6 @@ packages:
       '@types/retry': 0.12.0
       retry: 0.13.1
 
-  /p-timeout@3.2.0:
-    resolution: {integrity: sha512-rhIwUycgwwKcP9yTOOFK/AKsAopjjCakVqLHePO3CC6Mir1Z99xT+R63jZxAT5lFZLa2inS5h+ZS2GvR99/FBg==}
-    engines: {node: '>=8'}
-    dependencies:
-      p-finally: 1.0.0
-    dev: true
-
   /p-try@2.2.0:
     resolution: {integrity: sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==}
     engines: {node: '>=6'}
@@ -11890,22 +11580,6 @@ packages:
     resolution: {integrity: sha512-fnWVljUchTro6RiCFvCXBbNhJc2NijN7oIQxbwsyL0buWJPG85v81ehlHI9fXrJsMNgTofEoWIQeClKpgxFLrg==}
     dev: false
 
-  /playwright-core@1.41.1:
-    resolution: {integrity: sha512-/KPO5DzXSMlxSX77wy+HihKGOunh3hqndhqeo/nMxfigiKzogn8kfL0ZBDu0L1RKgan5XHCPmn6zXd2NUJgjhg==}
-    engines: {node: '>=16'}
-    hasBin: true
-    dev: true
-
-  /playwright@1.41.1:
-    resolution: {integrity: sha512-gdZAWG97oUnbBdRL3GuBvX3nDDmUOuqzV/D24dytqlKt+eI5KbwusluZRGljx1YoJKZ2NRPaeWiFTeGZO7SosQ==}
-    engines: {node: '>=16'}
-    hasBin: true
-    dependencies:
-      playwright-core: 1.41.1
-    optionalDependencies:
-      fsevents: 2.3.2
-    dev: true
-
   /pluralize@8.0.0:
     resolution: {integrity: sha512-Nc3IT5yHzflTfbjgqWcCPpo7DaKy4FnpB0l/zCAW0Tc7jxAiuqSxHasntB3D7887LSrA93kDJ9IXovxJYxyLCA==}
     engines: {node: '>=4'}
@@ -12534,14 +12208,6 @@ packages:
     dev: false
     optional: true
 
-  /prompts@2.1.0:
-    resolution: {integrity: sha512-+x5TozgqYdOwWsQFZizE/Tra3fKvAoy037kOyU6cgz84n8f6zxngLOV4O32kTwt9FcLCxAqw0P/c8rOr9y+Gfg==}
-    engines: {node: '>= 6'}
-    dependencies:
-      kleur: 3.0.3
-      sisteransi: 1.0.5
-    dev: true
-
   /prompts@2.4.2:
     resolution: {integrity: sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==}
     engines: {node: '>= 6'}
@@ -13063,26 +12729,12 @@ packages:
       unicode-match-property-ecmascript: 2.0.0
       unicode-match-property-value-ecmascript: 2.1.0
 
-  /registry-auth-token@3.3.2:
-    resolution: {integrity: sha512-JL39c60XlzCVgNrO+qq68FoNb56w/m7JYvGR2jT5iR1xBrUA3Mfx5Twk5rqTThPmQKMWydGmq8oFtDlxfrmxnQ==}
-    dependencies:
-      rc: 1.2.8
-      safe-buffer: 5.2.1
-    dev: true
-
   /registry-auth-token@5.0.2:
     resolution: {integrity: sha512-o/3ikDxtXaA59BmZuZrJZDJv8NMDGSj+6j6XaeBmHw8eY1i1qd9+6H+LjVvQXx3HN6aRCGa1cUdJ9RaJZUugnQ==}
     engines: {node: '>=14'}
     dependencies:
       '@pnpm/npm-conf': 2.2.2
 
-  /registry-url@3.1.0:
-    resolution: {integrity: sha512-ZbgR5aZEdf4UKZVBPYIgaglBmSF2Hi94s2PcIHhRGFjKYu+chjJdYfHn4rt3hB6eCKLJ8giVIIfgMa1ehDfZKA==}
-    engines: {node: '>=0.10.0'}
-    dependencies:
-      rc: 1.2.8
-    dev: true
-
   /registry-url@6.0.1:
     resolution: {integrity: sha512-+crtS5QjFRqFCoQmvGduwYWEBng99ZvmFvF+cUJkGYF1L1BfU8C6Zp9T7f5vPAwyLkUExpvK+ANVZmGU49qi4Q==}
     engines: {node: '>=12'}
@@ -13308,11 +12960,6 @@ packages:
       signal-exit: 3.0.7
     dev: true
 
-  /retry@0.12.0:
-    resolution: {integrity: sha512-9LkiTwjUh6rT555DtE9rTX+BKByPfrMzEAtnlEtdEwr3Nkffwiihqe2bWADg+OQRjt9gl6ICdmB/ZFDCGAtSow==}
-    engines: {node: '>= 4'}
-    dev: true
-
   /retry@0.13.1:
     resolution: {integrity: sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==}
     engines: {node: '>= 4'}
@@ -13330,14 +12977,6 @@ packages:
     dependencies:
       glob: 7.2.3
 
-  /rimraf@5.0.5:
-    resolution: {integrity: sha512-CqDakW+hMe/Bz202FPEymy68P+G50RfMQK+Qo5YUqc9SPipvbGjCGKd0RSKEelbsfQuw3g5NZDSrlZZAJurH1A==}
-    engines: {node: '>=14'}
-    hasBin: true
-    dependencies:
-      glob: 10.3.10
-    dev: true
-
   /rollup@4.12.0:
     resolution: {integrity: sha512-wz66wn4t1OHIJw3+XU7mJJQV/2NAfw5OAk6G6Hoo3zcvz/XOfQ52Vgi+AN4Uxoxi0KBBwk2g8zPrTDA4btSB/Q==}
     engines: {node: '>=18.0.0', npm: '>=8.0.0'}
@@ -13796,11 +13435,6 @@ packages:
       yargs: 15.4.1
     dev: true
 
-  /smol-toml@1.1.4:
-    resolution: {integrity: sha512-Y0OT8HezWsTNeEOSVxDnKOW/AyNXHQ4BwJNbAXlLTF5wWsBvrcHhIkE5Rf8kQMLmgf7nDX3PVOlgC6/Aiggu3Q==}
-    engines: {node: '>= 18', pnpm: '>= 8'}
-    dev: true
-
   /sockjs@0.3.24:
     resolution: {integrity: sha512-GJgLTZ7vYb/JtPSSZ10hsOYIvEYsjbNU+zPdIHcUaWVNUEPivzxku31865sSSud0Da0W4lEeOPlmw93zLQchuQ==}
     dependencies:
@@ -14268,14 +13902,6 @@ packages:
     dependencies:
       has-flag: 4.0.0
 
-  /supports-hyperlinks@2.3.0:
-    resolution: {integrity: sha512-RpsAZlpWcDwOPQA22aCH4J0t7L8JmAvsCxfOSEwm7cQs3LshN36QaTkwd70DnBOXDWGssw2eUoc8CaRWT0XunA==}
-    engines: {node: '>=8'}
-    dependencies:
-      has-flag: 4.0.0
-      supports-color: 7.2.0
-    dev: true
-
   /supports-preserve-symlinks-flag@1.0.0:
     resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==}
     engines: {node: '>= 0.4'}
@@ -14340,31 +13966,11 @@ packages:
       streamx: 2.15.6
     dev: false
 
-  /tar@6.1.15:
-    resolution: {integrity: sha512-/zKt9UyngnxIT/EAGYuxaMYgOIJiP81ab9ZfkILq4oNLPFX50qyYmu7jRj9qeXoxmJHjGlbH0+cm2uy1WCs10A==}
-    engines: {node: '>=10'}
-    dependencies:
-      chownr: 2.0.0
-      fs-minipass: 2.1.0
-      minipass: 5.0.0
-      minizlib: 2.1.2
-      mkdirp: 1.0.4
-      yallist: 4.0.0
-    dev: true
-
   /term-size@2.2.1:
     resolution: {integrity: sha512-wK0Ri4fOGjv/XPy8SBHZChl8CM7uMc5VML7SqiQ0zG7+J5Vr+RMQDoHa2CNT6KHUnTGIXH34UDMkPzAUyapBZg==}
     engines: {node: '>=8'}
     dev: true
 
-  /terminal-link@3.0.0:
-    resolution: {integrity: sha512-flFL3m4wuixmf6IfhFJd1YPiLiMuxEc8uHRM1buzIeZPm22Au2pDqBJQgdo7n1WfPU1ONFGv7YDwpFBmHGF6lg==}
-    engines: {node: '>=12'}
-    dependencies:
-      ansi-escapes: 5.0.0
-      supports-hyperlinks: 2.3.0
-    dev: true
-
   /terser-webpack-plugin@5.3.10(webpack@5.90.0):
     resolution: {integrity: sha512-BKFPWlPDndPs+NGGCr1U59t0XScL5317Y0UReNrHaw9/FwhPENlq6bfgs+4yPfyP51vqC1bQ4rp1EfXW5ZSH9w==}
     engines: {node: '>= 10.13.0'}
@@ -14476,11 +14082,6 @@ packages:
     resolution: {integrity: sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==}
     engines: {node: '>=4'}
 
-  /to-readable-stream@2.1.0:
-    resolution: {integrity: sha512-o3Qa6DGg1CEXshSdvWNX2sN4QHqg03SPq7U6jPXRahlQdl5dK8oXjkU/2/sGrnOZKeGV1zLSO8qPwyKklPPE7w==}
-    engines: {node: '>=8'}
-    dev: true
-
   /to-regex-range@5.0.1:
     resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==}
     engines: {node: '>=8.0'}
@@ -14760,11 +14361,6 @@ packages:
     engines: {node: '>=4'}
     dev: true
 
-  /type-fest@0.10.0:
-    resolution: {integrity: sha512-EUV9jo4sffrwlg8s0zDhP0T2WD3pru5Xi0+HTE3zTUmBaZNhfkite9PdSJwdXLwPVW0jnAHT56pZHIOYckPEiw==}
-    engines: {node: '>=8'}
-    dev: true
-
   /type-fest@0.13.1:
     resolution: {integrity: sha512-34R7HTnG0XIJcBSn5XhDd7nNFPRcXYRZrBB2O2jdKqYODldSzBAqzsWoZYYvduky73toYS/ESqxPvkDf/F0XMg==}
     engines: {node: '>=10'}
@@ -15038,13 +14634,6 @@ packages:
       picocolors: 1.0.0
     dev: false
 
-  /update-check@1.5.4:
-    resolution: {integrity: sha512-5YHsflzHP4t1G+8WGPlvKbJEbAJGCgw+Em+dGR1KmBUbr1J36SJBqlHLjR7oob7sco5hWHGQVcr9B2poIVDDTQ==}
-    dependencies:
-      registry-auth-token: 3.3.2
-      registry-url: 3.1.0
-    dev: true
-
   /update-notifier@6.0.2:
     resolution: {integrity: sha512-EDxhTEVPZZRLWYcJ4ZXjGFN0oP7qYvbXWzEgRm/Yql4dHX5wDbvh89YHP6PK1lzZJYrMtXUuZZz8XGK+U6U1og==}
     engines: {node: '>=14.16'}
@@ -15118,12 +14707,6 @@ packages:
       spdx-expression-parse: 3.0.1
     dev: true
 
-  /validate-npm-package-name@3.0.0:
-    resolution: {integrity: sha512-M6w37eVCMMouJ9V/sdPGnC5H4uDr73/+xdq0FBLO3TFFX1+7wiUY6Es328NN+y43tmY+doUdN9g9J21vqB7iLw==}
-    dependencies:
-      builtins: 1.0.3
-    dev: true
-
   /value-equal@1.0.1:
     resolution: {integrity: sha512-NOJ6JZCAWr0zlxZt+xqCHNTEKOsrks2HQd4MqhP1qy4z1SkbEP467eNx6TgDKXMvUOb+OENfJCZwM+16n7fRfw==}
 
@@ -15269,18 +14852,6 @@ packages:
     resolution: {integrity: sha512-AFbieoL7a5LMqcnOF04ji+rpXadgOXnZsxQr//r83kLPr7biP7am3g9zbaZIaBGwBRWeSvoMD4mgPdX3e4NWBg==}
     dev: true
 
-  /wait-port@1.1.0:
-    resolution: {integrity: sha512-3e04qkoN3LxTMLakdqeWth8nih8usyg+sf1Bgdf9wwUkp05iuK1eSY/QpLvscT/+F/gA89+LpUmmgBtesbqI2Q==}
-    engines: {node: '>=10'}
-    hasBin: true
-    dependencies:
-      chalk: 4.1.2
-      commander: 9.5.0
-      debug: 4.3.4
-    transitivePeerDependencies:
-      - supports-color
-    dev: true
-
   /walkdir@0.4.1:
     resolution: {integrity: sha512-3eBwRyEln6E1MSzcxcVpQIhRG8Q1jLvEqRmCZqS3dsfXEDR/AhOF4d+jHg1qvDCpYaVRZjENPQyrVxAkQqxPgQ==}
     engines: {node: '>=6.0.0'}
@@ -15857,9 +15428,3 @@ packages:
 
   /zwitch@2.0.4:
     resolution: {integrity: sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==}
-
-  github.com/watson/ci-info/f43f6a1cefff47fb361c88cf4b943fdbcaafe540:
-    resolution: {tarball: https://codeload.github.com/watson/ci-info/tar.gz/f43f6a1cefff47fb361c88cf4b943fdbcaafe540}
-    name: ci-info
-    version: 2.0.0
-    dev: true
diff --git a/tsconfig.json b/tsconfig.json
index d95d2781d5aca415be87cebda1290a818ce9c76f..029b13bdab084f3591109da88dbd044f98139f6c 100644
--- a/tsconfig.json
+++ b/tsconfig.json
@@ -35,12 +35,6 @@
     {
       "path": "./packages/env/tsconfig.json"
     },
-    {
-      "path": "./packages/create-llama"
-    },
-    {
-      "path": "./packages/create-llama/e2e"
-    },
     {
       "path": "./examples"
     },