diff --git a/.changeset/great-spies-cover.md b/.changeset/great-spies-cover.md
new file mode 100644
index 0000000000000000000000000000000000000000..8e4ea88bd1c83251e306a79f89cbd482b4a5d33e
--- /dev/null
+++ b/.changeset/great-spies-cover.md
@@ -0,0 +1,5 @@
+---
+"create-llama": patch
+---
+
+Add e2e testing for llamacloud datasource
diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml
index 7de9ade7d6d174ba8fd6c9922efc0c86b45ace50..c76cded884cdbef13e25172c36aa8ba2f7b8dbca 100644
--- a/.github/workflows/e2e.yml
+++ b/.github/workflows/e2e.yml
@@ -62,6 +62,7 @@ jobs:
         run: pnpm run e2e
         env:
           OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
+          LLAMA_CLOUD_API_KEY: ${{ secrets.LLAMA_CLOUD_API_KEY }}
         working-directory: .
 
       - uses: actions/upload-artifact@v3
diff --git a/e2e/basic.spec.ts b/e2e/basic.spec.ts
index 454e552abac74b90698b18134ed4907e7e0bfa86..57e3d01e883e275accdfcd83ea4224766eab07be 100644
--- a/e2e/basic.spec.ts
+++ b/e2e/basic.spec.ts
@@ -17,13 +17,16 @@ const templateFrameworks: TemplateFramework[] = [
   "express",
   "fastapi",
 ];
-const dataSources: string[] = ["--no-files", "--example-file"];
+const dataSources: string[] = ["--no-files", "--llamacloud"];
 const templateUIs: TemplateUI[] = ["shadcn", "html"];
 const templatePostInstallActions: TemplatePostInstallAction[] = [
   "none",
   "runApp",
 ];
 
+const llamaCloudProjectName = "create-llama";
+const llamaCloudIndexName = "e2e-test";
+
 for (const templateType of templateTypes) {
   for (const templateFramework of templateFrameworks) {
     for (const dataSource of dataSources) {
@@ -31,6 +34,10 @@ for (const templateType of templateTypes) {
         for (const templatePostInstallAction of templatePostInstallActions) {
           const appType: AppType =
             templateFramework === "nextjs" ? "" : "--frontend";
+          const userMessage =
+            dataSource !== "--no-files"
+              ? "Physical standard for letters"
+              : "Hello";
           test.describe(`try create-llama ${templateType} ${templateFramework} ${dataSource} ${templateUI} ${appType} ${templatePostInstallAction}`, async () => {
             let port: number;
             let externalPort: number;
@@ -55,6 +62,8 @@ for (const templateType of templateTypes) {
                 port,
                 externalPort,
                 templatePostInstallAction,
+                llamaCloudProjectName,
+                llamaCloudIndexName,
               );
               name = result.projectName;
               appProcess = result.appProcess;
@@ -75,7 +84,7 @@ for (const templateType of templateTypes) {
             }) => {
               test.skip(templatePostInstallAction !== "runApp");
               await page.goto(`http://localhost:${port}`);
-              await page.fill("form input", "hello");
+              await page.fill("form input", userMessage);
               const [response] = await Promise.all([
                 page.waitForResponse(
                   (res) => {
@@ -106,7 +115,7 @@ for (const templateType of templateTypes) {
                     messages: [
                       {
                         role: "user",
-                        content: "Hello",
+                        content: userMessage,
                       },
                     ],
                   },
diff --git a/e2e/utils.ts b/e2e/utils.ts
index 7001403c76c272f6e867936ba548093b388ef88a..bacfbdccb8b2b6f699afaac4be13bd134401df56 100644
--- a/e2e/utils.ts
+++ b/e2e/utils.ts
@@ -72,9 +72,13 @@ export async function runCreateLlama(
   port: number,
   externalPort: number,
   postInstallAction: TemplatePostInstallAction,
+  llamaCloudProjectName: string,
+  llamaCloudIndexName: string,
 ): Promise<CreateLlamaResult> {
-  if (!process.env.OPENAI_API_KEY) {
-    throw new Error("Setting OPENAI_API_KEY is mandatory to run tests");
+  if (!process.env.OPENAI_API_KEY || !process.env.LLAMA_CLOUD_API_KEY) {
+    throw new Error(
+      "Setting the OPENAI_API_KEY and LLAMA_CLOUD_API_KEY is mandatory to run tests",
+    );
   }
   const name = [
     templateType,
@@ -110,12 +114,16 @@ export async function runCreateLlama(
     "--no-llama-parse",
     "--observability",
     "none",
+    "--llama-cloud-key",
+    process.env.LLAMA_CLOUD_API_KEY,
   ].join(" ");
   console.log(`running command '${command}' in ${cwd}`);
   const appProcess = exec(command, {
     cwd,
     env: {
       ...process.env,
+      LLAMA_CLOUD_PROJECT_NAME: llamaCloudProjectName,
+      LLAMA_CLOUD_INDEX_NAME: llamaCloudIndexName,
     },
   });
   appProcess.stderr?.on("data", (data) => {
diff --git a/helpers/index.ts b/helpers/index.ts
index b085c15c340ed11eca81a33cc74ec080f3bc11f1..53e302ab5d6f005d68cefa6314d685e03818b802 100644
--- a/helpers/index.ts
+++ b/helpers/index.ts
@@ -23,6 +23,31 @@ import {
 } from "./types";
 import { installTSTemplate } from "./typescript";
 
+const checkForGenerateScript = (
+  modelConfig: ModelConfig,
+  vectorDb?: TemplateVectorDB,
+  llamaCloudKey?: string,
+  useLlamaParse?: boolean,
+) => {
+  const missingSettings = [];
+
+  if (!modelConfig.isConfigured()) {
+    missingSettings.push("your model provider API key");
+  }
+
+  const llamaCloudApiKey = llamaCloudKey ?? process.env["LLAMA_CLOUD_API_KEY"];
+  const isRequiredLlamaCloudKey = useLlamaParse || vectorDb === "llamacloud";
+  if (isRequiredLlamaCloudKey && !llamaCloudApiKey) {
+    missingSettings.push("your LLAMA_CLOUD_API_KEY");
+  }
+
+  if (vectorDb !== "none" && vectorDb !== "llamacloud") {
+    missingSettings.push("your Vector DB environment variables");
+  }
+
+  return missingSettings;
+};
+
 // eslint-disable-next-line max-params
 async function generateContextData(
   framework: TemplateFramework,
@@ -38,12 +63,15 @@ async function generateContextData(
         ? "poetry run generate"
         : `${packageManager} run generate`,
     )}`;
-    const modelConfigured = modelConfig.isConfigured();
-    const llamaCloudKeyConfigured = useLlamaParse
-      ? llamaCloudKey || process.env["LLAMA_CLOUD_API_KEY"]
-      : true;
-    const hasVectorDb = vectorDb && vectorDb !== "none";
-    if (modelConfigured && llamaCloudKeyConfigured && !hasVectorDb) {
+
+    const missingSettings = checkForGenerateScript(
+      modelConfig,
+      vectorDb,
+      llamaCloudKey,
+      useLlamaParse,
+    );
+
+    if (!missingSettings.length) {
       // If all the required environment variables are set, run the generate script
       if (framework === "fastapi") {
         if (isHavingPoetryLockFile()) {
@@ -63,15 +91,8 @@ async function generateContextData(
       }
     }
 
-    // generate the message of what to do to run the generate script manually
-    const settings = [];
-    if (!modelConfigured) settings.push("your model provider API key");
-    if (!llamaCloudKeyConfigured) settings.push("your Llama Cloud key");
-    if (hasVectorDb) settings.push("your Vector DB environment variables");
-    const settingsMessage =
-      settings.length > 0 ? `After setting ${settings.join(" and ")}, ` : "";
-    const generateMessage = `run ${runGenerate} to generate the context data.`;
-    console.log(`\n${settingsMessage}${generateMessage}\n\n`);
+    const settingsMessage = `After setting ${missingSettings.join(" and ")}, run ${runGenerate} to generate the context data.`;
+    console.log(`\n${settingsMessage}\n\n`);
   }
 }
 
diff --git a/index.ts b/index.ts
index c098295aa694ef808075d9f241af95c322550082..5c6059798c64099607d96314d3354fb58e7b3f43 100644
--- a/index.ts
+++ b/index.ts
@@ -9,7 +9,7 @@ import prompts from "prompts";
 import terminalLink from "terminal-link";
 import checkForUpdate from "update-check";
 import { createApp } from "./create-app";
-import { getDataSources } from "./helpers/datasources";
+import { EXAMPLE_FILE, getDataSources } from "./helpers/datasources";
 import { getPkgManager } from "./helpers/get-pkg-manager";
 import { isFolderEmpty } from "./helpers/is-folder-empty";
 import { initializeGlobalAgent } from "./helpers/proxy";
@@ -194,8 +194,16 @@ if (process.argv.includes("--no-llama-parse")) {
 program.askModels = process.argv.includes("--ask-models");
 if (process.argv.includes("--no-files")) {
   program.dataSources = [];
-} else {
+} else if (process.argv.includes("--example-file")) {
   program.dataSources = getDataSources(program.files, program.exampleFile);
+} else if (process.argv.includes("--llamacloud")) {
+  program.dataSources = [
+    {
+      type: "llamacloud",
+      config: {},
+    },
+    EXAMPLE_FILE,
+  ];
 }
 
 const packageManager = !!program.useNpm
diff --git a/questions.ts b/questions.ts
index 4aa6b40ff64dc0699c8dd167d285558cda18e3d9..429a29f361ff3c76e18e36e45f4d363b46a26dc3 100644
--- a/questions.ts
+++ b/questions.ts
@@ -671,21 +671,24 @@ export const askQuestions = async (
 
   // Ask for LlamaCloud API key when using a LlamaCloud index or LlamaParse
   if (isUsingLlamaCloud || program.useLlamaParse) {
-    if (ciInfo.isCI) {
-      program.llamaCloudKey = getPrefOrDefault("llamaCloudKey");
-    } else {
-      // Ask for LlamaCloud API key
-      const { llamaCloudKey } = await prompts(
-        {
-          type: "text",
-          name: "llamaCloudKey",
-          message:
-            "Please provide your LlamaCloud API key (leave blank to skip):",
-        },
-        questionHandlers,
-      );
-      program.llamaCloudKey = preferences.llamaCloudKey =
-        llamaCloudKey || process.env.LLAMA_CLOUD_API_KEY;
+    if (!program.llamaCloudKey) {
+      // if already set, don't ask again
+      if (ciInfo.isCI) {
+        program.llamaCloudKey = getPrefOrDefault("llamaCloudKey");
+      } else {
+        // Ask for LlamaCloud API key
+        const { llamaCloudKey } = await prompts(
+          {
+            type: "text",
+            name: "llamaCloudKey",
+            message:
+              "Please provide your LlamaCloud API key (leave blank to skip):",
+          },
+          questionHandlers,
+        );
+        program.llamaCloudKey = preferences.llamaCloudKey =
+          llamaCloudKey || process.env.LLAMA_CLOUD_API_KEY;
+      }
     }
   }