diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml
index c76cded884cdbef13e25172c36aa8ba2f7b8dbca..332b025bbed81448271e5c2bc0f1388fd001d546 100644
--- a/.github/workflows/e2e.yml
+++ b/.github/workflows/e2e.yml
@@ -18,6 +18,8 @@ jobs:
         node-version: [18, 20]
         python-version: ["3.11"]
         os: [macos-latest, windows-latest, ubuntu-22.04]
+        frameworks: ["nextjs", "express", "fastapi"]
+        datasources: ["--no-files", "--example-file"]
     defaults:
       run:
         shell: bash
@@ -63,6 +65,8 @@ jobs:
         env:
           OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
           LLAMA_CLOUD_API_KEY: ${{ secrets.LLAMA_CLOUD_API_KEY }}
+          FRAMEWORK: ${{ matrix.frameworks }}
+          DATASOURCE: ${{ matrix.datasources }}
         working-directory: .
 
       - uses: actions/upload-artifact@v3
diff --git a/e2e/basic.spec.ts b/e2e/basic.spec.ts
index 76d518fe0503a3bb4cc949e8974813f30edef21a..7f26192ef3f2a07299a1a8ead3b2f93a2d8102db 100644
--- a/e2e/basic.spec.ts
+++ b/e2e/basic.spec.ts
@@ -11,128 +11,110 @@ import type {
 } from "../helpers";
 import { createTestDir, runCreateLlama, type AppType } from "./utils";
 
-const templateTypes: TemplateType[] = ["streaming"];
-const templateFrameworks: TemplateFramework[] = [
-  "nextjs",
-  "express",
-  "fastapi",
-];
-const dataSources: string[] = ["--no-files", "--example-file"];
-const templateUIs: TemplateUI[] = ["shadcn"];
-const templatePostInstallActions: TemplatePostInstallAction[] = [
-  "none",
-  "runApp",
-];
+const templateType: TemplateType = "streaming";
+const templateFramework: TemplateFramework = process.env.FRAMEWORK
+  ? (process.env.FRAMEWORK as TemplateFramework)
+  : "fastapi";
+const dataSource: string = process.env.DATASOURCE
+  ? process.env.DATASOURCE
+  : "--example-file";
+const templateUI: TemplateUI = "shadcn";
+const templatePostInstallAction: TemplatePostInstallAction = "runApp";
 
 const llamaCloudProjectName = "create-llama";
 const llamaCloudIndexName = "e2e-test";
 
-for (const templateType of templateTypes) {
-  for (const templateFramework of templateFrameworks) {
-    for (const dataSource of dataSources) {
-      for (const templateUI of templateUIs) {
-        for (const templatePostInstallAction of templatePostInstallActions) {
-          const appType: AppType =
-            templateFramework === "nextjs" ? "" : "--frontend";
-          const userMessage =
-            dataSource !== "--no-files"
-              ? "Physical standard for letters"
-              : "Hello";
-          test.describe(`try create-llama ${templateType} ${templateFramework} ${dataSource} ${templateUI} ${appType} ${templatePostInstallAction}`, async () => {
-            let port: number;
-            let externalPort: number;
-            let cwd: string;
-            let name: string;
-            let appProcess: ChildProcess;
-            // Only test without using vector db for now
-            const vectorDb = "none";
+const appType: AppType = templateFramework === "nextjs" ? "" : "--frontend";
+const userMessage =
+  dataSource !== "--no-files" ? "Physical standard for letters" : "Hello";
+test.describe(`try create-llama ${templateType} ${templateFramework} ${dataSource} ${templateUI} ${appType} ${templatePostInstallAction}`, async () => {
+  let port: number;
+  let externalPort: number;
+  let cwd: string;
+  let name: string;
+  let appProcess: ChildProcess;
+  // Only test without using vector db for now
+  const vectorDb = "none";
 
-            test.beforeAll(async () => {
-              port = Math.floor(Math.random() * 10000) + 10000;
-              externalPort = port + 1;
-              cwd = await createTestDir();
-              const result = await runCreateLlama(
-                cwd,
-                templateType,
-                templateFramework,
-                dataSource,
-                templateUI,
-                vectorDb,
-                appType,
-                port,
-                externalPort,
-                templatePostInstallAction,
-                llamaCloudProjectName,
-                llamaCloudIndexName,
-              );
-              name = result.projectName;
-              appProcess = result.appProcess;
-            });
+  test.beforeAll(async () => {
+    port = Math.floor(Math.random() * 10000) + 10000;
+    externalPort = port + 1;
+    cwd = await createTestDir();
+    const result = await runCreateLlama(
+      cwd,
+      templateType,
+      templateFramework,
+      dataSource,
+      templateUI,
+      vectorDb,
+      appType,
+      port,
+      externalPort,
+      templatePostInstallAction,
+      llamaCloudProjectName,
+      llamaCloudIndexName,
+    );
+    name = result.projectName;
+    appProcess = result.appProcess;
+  });
 
-            test("App folder should exist", async () => {
-              const dirExists = fs.existsSync(path.join(cwd, name));
-              expect(dirExists).toBeTruthy();
-            });
-            test("Frontend should have a title", async ({ page }) => {
-              test.skip(templatePostInstallAction !== "runApp");
-              await page.goto(`http://localhost:${port}`);
-              await expect(page.getByText("Built by LlamaIndex")).toBeVisible();
-            });
+  test("App folder should exist", async () => {
+    const dirExists = fs.existsSync(path.join(cwd, name));
+    expect(dirExists).toBeTruthy();
+  });
+  test("Frontend should have a title", async ({ page }) => {
+    test.skip(templatePostInstallAction !== "runApp");
+    await page.goto(`http://localhost:${port}`);
+    await expect(page.getByText("Built by LlamaIndex")).toBeVisible();
+  });
 
-            test("Frontend should be able to submit a message and receive a response", async ({
-              page,
-            }) => {
-              test.skip(templatePostInstallAction !== "runApp");
-              await page.goto(`http://localhost:${port}`);
-              await page.fill("form input", userMessage);
-              const [response] = await Promise.all([
-                page.waitForResponse(
-                  (res) => {
-                    return (
-                      res.url().includes("/api/chat") && res.status() === 200
-                    );
-                  },
-                  {
-                    timeout: 1000 * 60,
-                  },
-                ),
-                page.click("form button[type=submit]"),
-              ]);
-              const text = await response.text();
-              console.log("AI response when submitting message: ", text);
-              expect(response.ok()).toBeTruthy();
-            });
+  test("Frontend should be able to submit a message and receive a response", async ({
+    page,
+  }) => {
+    test.skip(templatePostInstallAction !== "runApp");
+    await page.goto(`http://localhost:${port}`);
+    await page.fill("form input", userMessage);
+    const [response] = await Promise.all([
+      page.waitForResponse(
+        (res) => {
+          return res.url().includes("/api/chat") && res.status() === 200;
+        },
+        {
+          timeout: 1000 * 60,
+        },
+      ),
+      page.click("form button[type=submit]"),
+    ]);
+    const text = await response.text();
+    console.log("AI response when submitting message: ", text);
+    expect(response.ok()).toBeTruthy();
+  });
 
-            test("Backend frameworks should response when calling non-streaming chat API", async ({
-              request,
-            }) => {
-              test.skip(templatePostInstallAction !== "runApp");
-              test.skip(templateFramework === "nextjs");
-              const response = await request.post(
-                `http://localhost:${externalPort}/api/chat/request`,
-                {
-                  data: {
-                    messages: [
-                      {
-                        role: "user",
-                        content: userMessage,
-                      },
-                    ],
-                  },
-                },
-              );
-              const text = await response.text();
-              console.log("AI response when calling API: ", text);
-              expect(response.ok()).toBeTruthy();
-            });
+  test("Backend frameworks should response when calling non-streaming chat API", async ({
+    request,
+  }) => {
+    test.skip(templatePostInstallAction !== "runApp");
+    test.skip(templateFramework === "nextjs");
+    const response = await request.post(
+      `http://localhost:${externalPort}/api/chat/request`,
+      {
+        data: {
+          messages: [
+            {
+              role: "user",
+              content: userMessage,
+            },
+          ],
+        },
+      },
+    );
+    const text = await response.text();
+    console.log("AI response when calling API: ", text);
+    expect(response.ok()).toBeTruthy();
+  });
 
-            // clean processes
-            test.afterAll(async () => {
-              appProcess?.kill();
-            });
-          });
-        }
-      }
-    }
-  }
-}
+  // clean processes
+  test.afterAll(async () => {
+    appProcess?.kill();
+  });
+});