diff --git a/.changeset/chilled-zebras-taste.md b/.changeset/chilled-zebras-taste.md
new file mode 100644
index 0000000000000000000000000000000000000000..7a87569a3c08a5027087fbbc059fe3c531a5fbad
--- /dev/null
+++ b/.changeset/chilled-zebras-taste.md
@@ -0,0 +1,5 @@
+---
+"create-llama": patch
+---
+
+Fix: programmatically ensure index for LlamaCloud
diff --git a/.changeset/hungry-chairs-tie.md b/.changeset/hungry-chairs-tie.md
new file mode 100644
index 0000000000000000000000000000000000000000..42f030babb2376eb8221b919dc9466db077d5612
--- /dev/null
+++ b/.changeset/hungry-chairs-tie.md
@@ -0,0 +1,5 @@
+---
+"create-llama": patch
+---
+
+Fix .env not loaded on poetry run generate
diff --git a/templates/components/multiagent/python/app/api/routers/chat.py b/templates/components/multiagent/python/app/api/routers/chat.py
index 23135c8093c818a52ef49d6e487e8701e3acf3ce..e2c808282d13f2522e576eb686ec74d087004b12 100644
--- a/templates/components/multiagent/python/app/api/routers/chat.py
+++ b/templates/components/multiagent/python/app/api/routers/chat.py
@@ -5,7 +5,7 @@ from app.api.routers.models import (
     ChatData,
 )
 from app.api.routers.vercel_response import VercelStreamResponse
-from app.engine import get_chat_engine
+from app.engine.engine import get_chat_engine
 from fastapi import APIRouter, BackgroundTasks, HTTPException, Request, status
 
 chat_router = r = APIRouter()
diff --git a/templates/components/vectordbs/python/llamacloud/generate.py b/templates/components/vectordbs/python/llamacloud/generate.py
index 41a0966737c2f6079cc9e1b7f263149a70517a8b..2efec0ee15971c5da08bce08e9c2ff98a3e35319 100644
--- a/templates/components/vectordbs/python/llamacloud/generate.py
+++ b/templates/components/vectordbs/python/llamacloud/generate.py
@@ -1,10 +1,17 @@
 # flake8: noqa: E402
+import os
 from dotenv import load_dotenv
 
-from app.engine.index import get_index
-
 load_dotenv()
 
+from llama_cloud import PipelineType
+
+from app.settings import init_settings
+from llama_index.core.settings import Settings
+
+
+from app.engine.index import get_client, get_index
+
 import logging
 from llama_index.core.readers import SimpleDirectoryReader
 from app.engine.service import LLamaCloudFileService
@@ -13,10 +20,49 @@ logging.basicConfig(level=logging.INFO)
 logger = logging.getLogger()
 
 
+def ensure_index(index):
+    project_id = index._get_project_id()
+    client = get_client()
+    pipelines = client.pipelines.search_pipelines(
+        project_id=project_id,
+        pipeline_name=index.name,
+        pipeline_type=PipelineType.MANAGED.value,
+    )
+    if len(pipelines) == 0:
+        from llama_index.embeddings.openai import OpenAIEmbedding
+
+        if not isinstance(Settings.embed_model, OpenAIEmbedding):
+            raise ValueError(
+                "Creating a new pipeline with a non-OpenAI embedding model is not supported."
+            )
+        client.pipelines.upsert_pipeline(
+            project_id=project_id,
+            request={
+                "name": index.name,
+                "embedding_config": {
+                    "type": "OPENAI_EMBEDDING",
+                    "component": {
+                        "api_key": os.getenv("OPENAI_API_KEY"),  # editable
+                        "model_name": os.getenv("EMBEDDING_MODEL"),
+                    },
+                },
+                "transform_config": {
+                    "mode": "auto",
+                    "config": {
+                        "chunk_size": Settings.chunk_size,  # editable
+                        "chunk_overlap": Settings.chunk_overlap,  # editable
+                    },
+                },
+            },
+        )
+
+
 def generate_datasource():
+    init_settings()
     logger.info("Generate index for the provided data")
 
     index = get_index()
+    ensure_index(index)
     project_id = index._get_project_id()
     pipeline_id = index._get_pipeline_id()
 
diff --git a/templates/components/vectordbs/python/llamacloud/index.py b/templates/components/vectordbs/python/llamacloud/index.py
index 570f7223265fb333dee0521fcd489da36541f1c6..cef8f90c46b293e66e120ba57408bd192e071a19 100644
--- a/templates/components/vectordbs/python/llamacloud/index.py
+++ b/templates/components/vectordbs/python/llamacloud/index.py
@@ -7,7 +7,7 @@ from llama_index.core.ingestion.api_utils import (
     get_client as llama_cloud_get_client,
 )
 from llama_index.indices.managed.llama_cloud import LlamaCloudIndex
-from pydantic import BaseModel, Field, validator
+from pydantic import BaseModel, Field, field_validator
 
 logger = logging.getLogger("uvicorn")
 
@@ -15,31 +15,26 @@ logger = logging.getLogger("uvicorn")
 class LlamaCloudConfig(BaseModel):
     # Private attributes
     api_key: str = Field(
-        default=os.getenv("LLAMA_CLOUD_API_KEY"),
         exclude=True,  # Exclude from the model representation
     )
     base_url: Optional[str] = Field(
-        default=os.getenv("LLAMA_CLOUD_BASE_URL"),
         exclude=True,
     )
     organization_id: Optional[str] = Field(
-        default=os.getenv("LLAMA_CLOUD_ORGANIZATION_ID"),
         exclude=True,
     )
     # Configuration attributes, can be set by the user
     pipeline: str = Field(
         description="The name of the pipeline to use",
-        default=os.getenv("LLAMA_CLOUD_INDEX_NAME"),
     )
     project: str = Field(
         description="The name of the LlamaCloud project",
-        default=os.getenv("LLAMA_CLOUD_PROJECT_NAME"),
     )
 
     # Validate and throw error if the env variables are not set before starting the app
-    @validator("pipeline", "project", "api_key", pre=True, always=True)
+    @field_validator("pipeline", "project", "api_key", mode="before")
     @classmethod
-    def validate_env_vars(cls, value):
+    def validate_fields(cls, value):
         if value is None:
             raise ValueError(
                 "Please set LLAMA_CLOUD_INDEX_NAME, LLAMA_CLOUD_PROJECT_NAME and LLAMA_CLOUD_API_KEY"
@@ -53,10 +48,20 @@ class LlamaCloudConfig(BaseModel):
             "base_url": self.base_url,
         }
 
+    @classmethod
+    def from_env(cls):
+        return LlamaCloudConfig(
+            api_key=os.getenv("LLAMA_CLOUD_API_KEY"),
+            base_url=os.getenv("LLAMA_CLOUD_BASE_URL"),
+            organization_id=os.getenv("LLAMA_CLOUD_ORGANIZATION_ID"),
+            pipeline=os.getenv("LLAMA_CLOUD_INDEX_NAME"),
+            project=os.getenv("LLAMA_CLOUD_PROJECT_NAME"),
+        )
+
 
 class IndexConfig(BaseModel):
     llama_cloud_pipeline_config: LlamaCloudConfig = Field(
-        default=LlamaCloudConfig(),
+        default_factory=LlamaCloudConfig.from_env,
         alias="llamaCloudPipeline",
     )
     callback_manager: Optional[CallbackManager] = Field(
@@ -83,5 +88,5 @@ def get_index(config: IndexConfig = None):
 
 
 def get_client():
-    config = LlamaCloudConfig()
+    config = LlamaCloudConfig.from_env()
     return llama_cloud_get_client(**config.to_client_kwargs())
diff --git a/templates/types/streaming/fastapi/app/api/routers/chat.py b/templates/types/streaming/fastapi/app/api/routers/chat.py
index 48876efb6f9b4faaf98dc23bddc50fd3d5c04f81..7e96c9274ea4ec1cfd4e3bfdb91c8f12368bfc20 100644
--- a/templates/types/streaming/fastapi/app/api/routers/chat.py
+++ b/templates/types/streaming/fastapi/app/api/routers/chat.py
@@ -13,7 +13,7 @@ from app.api.routers.models import (
     SourceNodes,
 )
 from app.api.routers.vercel_response import VercelStreamResponse
-from app.engine import get_chat_engine
+from app.engine.engine import get_chat_engine
 from app.engine.query_filter import generate_filters
 
 chat_router = r = APIRouter()
diff --git a/templates/types/streaming/fastapi/app/engine/__init__.py b/templates/types/streaming/fastapi/app/engine/__init__.py
index 1549f8b6f7453d9bb7ff7f765ebc7dfd640cc004..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644
--- a/templates/types/streaming/fastapi/app/engine/__init__.py
+++ b/templates/types/streaming/fastapi/app/engine/__init__.py
@@ -1 +0,0 @@
-from .engine import get_chat_engine as get_chat_engine