diff --git a/.eslintrc b/.eslintrc
index a50d5a3c8bb4185cd761728512bd383c955d3aa7..fc6949f087a7257ff69649dcc594a22a49b9dd8c 100644
--- a/.eslintrc
+++ b/.eslintrc
@@ -6,6 +6,7 @@
       {
         "allowList": [
           "OPENAI_API_KEY",
+          "LLAMA_CLOUD_API_KEY",
           "npm_config_user_agent",
           "http_proxy",
           "https_proxy",
@@ -16,4 +17,4 @@
       }
     ]
   }
-}
\ No newline at end of file
+}
diff --git a/helpers/index.ts b/helpers/index.ts
index cb5e437228cb083ab6c3180f9192a4230e1e5b5e..b81bb59d222e1acaa22ef91b2a1bf59fa2a1b0c8 100644
--- a/helpers/index.ts
+++ b/helpers/index.ts
@@ -52,8 +52,14 @@ const createEnvLocalFile = async (
     content += `EMBEDDING_MODEL=${opts?.embeddingModel}\n`;
   }
 
-  if (opts?.llamaCloudKey) {
-    content += `LLAMA_CLOUD_API_KEY=${opts?.llamaCloudKey}\n`;
+  if ((opts?.dataSource?.config as FileSourceConfig).useLlamaParse) {
+    if (opts?.llamaCloudKey) {
+      content += `LLAMA_CLOUD_API_KEY=${opts?.llamaCloudKey}\n`;
+    } else {
+      content += `# Please obtain the Llama Cloud API key from https://cloud.llamaindex.ai/api-key 
+# and set it to the LLAMA_CLOUD_API_KEY variable below.
+# LLAMA_CLOUD_API_KEY=`;
+    }
   }
 
   switch (opts?.vectorDb) {
@@ -95,22 +101,34 @@ const createEnvLocalFile = async (
   }
 };
 
-const generateContextData = async (
+// eslint-disable-next-line max-params
+async function generateContextData(
   framework: TemplateFramework,
   packageManager?: PackageManager,
   openAiKey?: string,
   vectorDb?: TemplateVectorDB,
-) => {
+  dataSource?: TemplateDataSource,
+  llamaCloudKey?: string,
+) {
   if (packageManager) {
     const runGenerate = `${cyan(
       framework === "fastapi"
         ? "poetry run python app/engine/generate.py"
         : `${packageManager} run generate`,
     )}`;
-    const hasOpenAiKey = openAiKey || process.env["OPENAI_API_KEY"];
+    const openAiKeyConfigured = openAiKey || process.env["OPENAI_API_KEY"];
+    const llamaCloudKeyConfigured = (dataSource?.config as FileSourceConfig)
+      ?.useLlamaParse
+      ? llamaCloudKey || process.env["LLAMA_CLOUD_API_KEY"]
+      : true;
     const hasVectorDb = vectorDb && vectorDb !== "none";
     if (framework === "fastapi") {
-      if (hasOpenAiKey && !hasVectorDb && isHavingPoetryLockFile()) {
+      if (
+        openAiKeyConfigured &&
+        llamaCloudKeyConfigured &&
+        !hasVectorDb &&
+        isHavingPoetryLockFile()
+      ) {
         console.log(`Running ${runGenerate} to generate the context data.`);
         const result = tryPoetryRun("python app/engine/generate.py");
         if (!result) {
@@ -121,7 +139,7 @@ const generateContextData = async (
         return;
       }
     } else {
-      if (hasOpenAiKey && vectorDb === "none") {
+      if (openAiKeyConfigured && vectorDb === "none") {
         console.log(`Running ${runGenerate} to generate the context data.`);
         await callPackageManager(packageManager, true, ["run", "generate"]);
         return;
@@ -129,14 +147,15 @@ const generateContextData = async (
     }
 
     const settings = [];
-    if (!hasOpenAiKey) settings.push("your OpenAI key");
+    if (!openAiKeyConfigured) settings.push("your OpenAI key");
+    if (!llamaCloudKeyConfigured) settings.push("your Llama Cloud key");
     if (hasVectorDb) settings.push("your Vector DB environment variables");
     const settingsMessage =
       settings.length > 0 ? `After setting ${settings.join(" and ")}, ` : "";
     const generateMessage = `run ${runGenerate} to generate the context data.`;
     console.log(`\n${settingsMessage}${generateMessage}\n\n`);
   }
-};
+}
 
 const copyContextData = async (
   root: string,
@@ -234,6 +253,8 @@ export const installTemplate = async (
           props.packageManager,
           props.openAiKey,
           props.vectorDb,
+          props.dataSource,
+          props.llamaCloudKey,
         );
       }
     }
diff --git a/questions.ts b/questions.ts
index 194c3cce37ccb5bebfdabf9ab1d725e0ce60ead4..6835bb36357cc285e0b164249515f739d1df41ef 100644
--- a/questions.ts
+++ b/questions.ts
@@ -221,12 +221,20 @@ export const askQuestions = async (
           },
         ];
 
-        const hasOpenAiKey = program.openAiKey || process.env["OPENAI_API_KEY"];
+        const openAiKeyConfigured =
+          program.openAiKey || process.env["OPENAI_API_KEY"];
+        // If using LlamaParse, require LlamaCloud API key
+        const llamaCloudKeyConfigured = (
+          program.dataSource?.config as FileSourceConfig
+        )?.useLlamaParse
+          ? program.llamaCloudKey || process.env["LLAMA_CLOUD_API_KEY"]
+          : true;
         const hasVectorDb = program.vectorDb && program.vectorDb !== "none";
         // Can run the app if all tools do not require configuration
         if (
           !hasVectorDb &&
-          hasOpenAiKey &&
+          openAiKeyConfigured &&
+          llamaCloudKeyConfigured &&
           !toolsRequireConfig(program.tools) &&
           !program.llamapack
         ) {
@@ -605,11 +613,8 @@ export const askQuestions = async (
           {
             type: "text",
             name: "llamaCloudKey",
-            message: "Please provide your LlamaIndex Cloud API key:",
-            validate: (value) =>
-              value
-                ? true
-                : "LlamaIndex Cloud API key is required. You can get it from: https://cloud.llamaindex.ai/api-key",
+            message:
+              "Please provide your LlamaIndex Cloud API key (leave blank to skip):",
           },
           handlers,
         );
diff --git a/templates/components/loaders/python/llama_parse/loader.py b/templates/components/loaders/python/llama_parse/loader.py
index 51e8db39c2408375e04d804f38f420a89658429b..d74d7c03be73c3f2116e7d4ed648d401499e2b15 100644
--- a/templates/components/loaders/python/llama_parse/loader.py
+++ b/templates/components/loaders/python/llama_parse/loader.py
@@ -5,10 +5,7 @@ DATA_DIR = "data"  # directory containing the documents
 
 
 def get_documents():
-    parser = LlamaParse(
-        result_type="markdown",
-        verbose=True,
-    )
+    parser = LlamaParse(result_type="markdown", verbose=True, language="en")
 
     reader = SimpleDirectoryReader(DATA_DIR, file_extractor={".pdf": parser})
     return reader.load_data()