diff --git a/.changeset/shaggy-rats-draw.md b/.changeset/shaggy-rats-draw.md
new file mode 100644
index 0000000000000000000000000000000000000000..d579556be6160adfda82fc0bad30fdb048421c28
--- /dev/null
+++ b/.changeset/shaggy-rats-draw.md
@@ -0,0 +1,5 @@
+---
+"create-llama": patch
+---
+
+Bump the LlamaCloud library and fix breaking changes (Python).
diff --git a/helpers/python.ts b/helpers/python.ts
index 4cd8cb75c5bf9d3a26c004b1202fa49fbe2408dc..5e08008b48a44a9f939d080256218cb8b081fa98 100644
--- a/helpers/python.ts
+++ b/helpers/python.ts
@@ -37,21 +37,21 @@ const getAdditionalDependencies = (
     case "mongo": {
       dependencies.push({
         name: "llama-index-vector-stores-mongodb",
-        version: "^0.3.1",
+        version: "^0.6.0",
       });
       break;
     }
     case "pg": {
       dependencies.push({
         name: "llama-index-vector-stores-postgres",
-        version: "^0.2.5",
+        version: "^0.3.2",
       });
       break;
     }
     case "pinecone": {
       dependencies.push({
         name: "llama-index-vector-stores-pinecone",
-        version: "^0.2.1",
+        version: "^0.4.1",
         constraints: {
           python: ">=3.11,<3.13",
         },
@@ -61,7 +61,7 @@ const getAdditionalDependencies = (
     case "milvus": {
       dependencies.push({
         name: "llama-index-vector-stores-milvus",
-        version: "^0.2.0",
+        version: "^0.3.0",
       });
       dependencies.push({
         name: "pymilvus",
@@ -72,14 +72,14 @@ const getAdditionalDependencies = (
     case "astra": {
       dependencies.push({
         name: "llama-index-vector-stores-astra-db",
-        version: "^0.2.0",
+        version: "^0.4.0",
       });
       break;
     }
     case "qdrant": {
       dependencies.push({
         name: "llama-index-vector-stores-qdrant",
-        version: "^0.3.0",
+        version: "^0.4.0",
         constraints: {
           python: ">=3.11,<3.13",
         },
@@ -89,21 +89,21 @@ const getAdditionalDependencies = (
     case "chroma": {
       dependencies.push({
         name: "llama-index-vector-stores-chroma",
-        version: "^0.2.0",
+        version: "^0.4.0",
       });
       break;
     }
     case "weaviate": {
       dependencies.push({
         name: "llama-index-vector-stores-weaviate",
-        version: "^1.1.1",
+        version: "^1.2.3",
       });
       break;
     }
     case "llamacloud":
       dependencies.push({
         name: "llama-index-indices-managed-llama-cloud",
-        version: "^0.6.0",
+        version: "^0.6.3",
       });
       break;
   }
@@ -122,13 +122,13 @@ const getAdditionalDependencies = (
         case "web":
           dependencies.push({
             name: "llama-index-readers-web",
-            version: "^0.2.2",
+            version: "^0.3.0",
           });
           break;
         case "db":
           dependencies.push({
             name: "llama-index-readers-database",
-            version: "^0.2.0",
+            version: "^0.3.0",
           });
           dependencies.push({
             name: "pymysql",
@@ -167,15 +167,15 @@ const getAdditionalDependencies = (
       if (templateType !== "multiagent") {
         dependencies.push({
           name: "llama-index-llms-openai",
-          version: "^0.2.0",
+          version: "^0.3.2",
         });
         dependencies.push({
           name: "llama-index-embeddings-openai",
-          version: "^0.2.3",
+          version: "^0.3.1",
         });
         dependencies.push({
           name: "llama-index-agent-openai",
-          version: "^0.3.0",
+          version: "^0.4.0",
         });
       }
       break;
@@ -524,7 +524,7 @@ export const installPythonTemplate = async ({
     if (observability === "llamatrace") {
       addOnDependencies.push({
         name: "llama-index-callbacks-arize-phoenix",
-        version: "^0.2.1",
+        version: "^0.3.0",
         constraints: {
           python: ">=3.11,<3.13",
         },
diff --git a/helpers/tools.ts b/helpers/tools.ts
index c3a34390c82a7194d2eca8dcb616d9e8bb4a824c..33bd96e607eef9c74321f5639c279909faeec70e 100644
--- a/helpers/tools.ts
+++ b/helpers/tools.ts
@@ -41,7 +41,7 @@ export const supportedTools: Tool[] = [
     dependencies: [
       {
         name: "llama-index-tools-google",
-        version: "^0.2.0",
+        version: "^0.3.0",
       },
     ],
     supportedFrameworks: ["fastapi"],
@@ -82,7 +82,7 @@ For better results, you can specify the region parameter to get results from a s
     dependencies: [
       {
         name: "llama-index-tools-wikipedia",
-        version: "^0.2.0",
+        version: "^0.3.0",
       },
     ],
     supportedFrameworks: ["fastapi", "express", "nextjs"],
diff --git a/templates/components/vectordbs/python/llamacloud/generate.py b/templates/components/vectordbs/python/llamacloud/generate.py
index acd28777eea7461fab06a37490f221487da9bad3..3932b86ab3ca1f2ae864e86d4ee4fa3cb5681f1d 100644
--- a/templates/components/vectordbs/python/llamacloud/generate.py
+++ b/templates/components/vectordbs/python/llamacloud/generate.py
@@ -1,5 +1,4 @@
 # flake8: noqa: E402
-import os
 
 from dotenv import load_dotenv
 
@@ -7,62 +6,24 @@ load_dotenv()
 
 import logging
 
-from app.engine.index import get_client, get_index
+from llama_index.core.readers import SimpleDirectoryReader
+from tqdm import tqdm
+
+from app.engine.index import get_index
 from app.engine.service import LLamaCloudFileService  # type: ignore
 from app.settings import init_settings
-from llama_cloud import PipelineType
-from llama_index.core.readers import SimpleDirectoryReader
-from llama_index.core.settings import Settings
 
 logging.basicConfig(level=logging.INFO)
 logger = logging.getLogger()
 
 
-def ensure_index(index):
-    project_id = index._get_project_id()
-    client = get_client()
-    pipelines = client.pipelines.search_pipelines(
-        project_id=project_id,
-        pipeline_name=index.name,
-        pipeline_type=PipelineType.MANAGED.value,
-    )
-    if len(pipelines) == 0:
-        from llama_index.embeddings.openai import OpenAIEmbedding
-
-        if not isinstance(Settings.embed_model, OpenAIEmbedding):
-            raise ValueError(
-                "Creating a new pipeline with a non-OpenAI embedding model is not supported."
-            )
-        client.pipelines.upsert_pipeline(
-            project_id=project_id,
-            request={
-                "name": index.name,
-                "embedding_config": {
-                    "type": "OPENAI_EMBEDDING",
-                    "component": {
-                        "api_key": os.getenv("OPENAI_API_KEY"),  # editable
-                        "model_name": os.getenv("EMBEDDING_MODEL"),
-                    },
-                },
-                "transform_config": {
-                    "mode": "auto",
-                    "config": {
-                        "chunk_size": Settings.chunk_size,  # editable
-                        "chunk_overlap": Settings.chunk_overlap,  # editable
-                    },
-                },
-            },
-        )
-
-
 def generate_datasource():
     init_settings()
     logger.info("Generate index for the provided data")
 
-    index = get_index()
-    ensure_index(index)
-    project_id = index._get_project_id()
-    pipeline_id = index._get_pipeline_id()
+    index = get_index(create_if_missing=True)
+    if index is None:
+        raise ValueError("Index not found and could not be created")
 
     # use SimpleDirectoryReader to retrieve the files to process
     reader = SimpleDirectoryReader(
@@ -72,14 +33,30 @@ def generate_datasource():
     files_to_process = reader.input_files
 
     # add each file to the LlamaCloud pipeline
-    for input_file in files_to_process:
+    error_files = []
+    for input_file in tqdm(
+        files_to_process,
+        desc="Processing files",
+        unit="file",
+    ):
         with open(input_file, "rb") as f:
-            logger.info(
+            logger.debug(
                 f"Adding file {input_file} to pipeline {index.name} in project {index.project_name}"
             )
-            LLamaCloudFileService.add_file_to_pipeline(
-                project_id, pipeline_id, f, custom_metadata={}
-            )
+            try:
+                LLamaCloudFileService.add_file_to_pipeline(
+                    index.project.id,
+                    index.pipeline.id,
+                    f,
+                    custom_metadata={},
+                    wait_for_processing=False,
+                )
+            except Exception as e:
+                error_files.append(input_file)
+                logger.error(f"Error adding file {input_file}: {e}")
+
+    if error_files:
+        logger.error(f"Failed to add the following files: {error_files}")
 
     logger.info("Finished generating the index")
 
diff --git a/templates/components/vectordbs/python/llamacloud/index.py b/templates/components/vectordbs/python/llamacloud/index.py
index f6f7e4f67a0a05fc4c1f4f210d5d92a5e5127b51..97261900a70f0f785afb9c93914281ce418d35d0 100644
--- a/templates/components/vectordbs/python/llamacloud/index.py
+++ b/templates/components/vectordbs/python/llamacloud/index.py
@@ -2,10 +2,12 @@ import logging
 import os
 from typing import Optional
 
+from llama_cloud import PipelineType
 from llama_index.core.callbacks import CallbackManager
 from llama_index.core.ingestion.api_utils import (
     get_client as llama_cloud_get_client,
 )
+from llama_index.core.settings import Settings
 from llama_index.indices.managed.llama_cloud import LlamaCloudIndex
 from pydantic import BaseModel, Field, field_validator
 
@@ -82,14 +84,63 @@ class IndexConfig(BaseModel):
         }
 
 
-def get_index(config: IndexConfig = None):
+def get_index(
+    config: IndexConfig = None,
+    create_if_missing: bool = False,
+):
     if config is None:
         config = IndexConfig()
-    index = LlamaCloudIndex(**config.to_index_kwargs())
-
-    return index
+    # Check whether the index exists
+    try:
+        index = LlamaCloudIndex(**config.to_index_kwargs())
+        return index
+    except ValueError:
+        logger.warning("Index not found")
+        if create_if_missing:
+            logger.info("Creating index")
+            _create_index(config)
+            return LlamaCloudIndex(**config.to_index_kwargs())
+        return None
 
 
 def get_client():
     config = LlamaCloudConfig()
     return llama_cloud_get_client(**config.to_client_kwargs())
+
+
+def _create_index(
+    config: IndexConfig,
+):
+    client = get_client()
+    pipeline_name = config.llama_cloud_pipeline_config.pipeline
+
+    pipelines = client.pipelines.search_pipelines(
+        pipeline_name=pipeline_name,
+        pipeline_type=PipelineType.MANAGED.value,
+    )
+    if len(pipelines) == 0:
+        from llama_index.embeddings.openai import OpenAIEmbedding
+
+        if not isinstance(Settings.embed_model, OpenAIEmbedding):
+            raise ValueError(
+                "Creating a new pipeline with a non-OpenAI embedding model is not supported."
+            )
+        client.pipelines.upsert_pipeline(
+            request={
+                "name": pipeline_name,
+                "embedding_config": {
+                    "type": "OPENAI_EMBEDDING",
+                    "component": {
+                        "api_key": os.getenv("OPENAI_API_KEY"),  # editable
+                        "model_name": os.getenv("EMBEDDING_MODEL"),
+                    },
+                },
+                "transform_config": {
+                    "mode": "auto",
+                    "config": {
+                        "chunk_size": Settings.chunk_size,  # editable
+                        "chunk_overlap": Settings.chunk_overlap,  # editable
+                    },
+                },
+            },
+        )
diff --git a/templates/components/vectordbs/python/llamacloud/service.py b/templates/components/vectordbs/python/llamacloud/service.py
index 68216f98e2fc59ad88bdcec6578a14bccbd30995..31b91365d3ed252698dd1607770e69022206f327 100644
--- a/templates/components/vectordbs/python/llamacloud/service.py
+++ b/templates/components/vectordbs/python/llamacloud/service.py
@@ -1,18 +1,18 @@
-from io import BytesIO
 import logging
 import os
 import time
-from typing import Any, Dict, List, Optional, Set, Tuple, Union
 import typing
+from io import BytesIO
+from typing import Any, Dict, List, Optional, Set, Tuple, Union
 
+import requests
 from fastapi import BackgroundTasks
 from llama_cloud import ManagedIngestionStatus, PipelineFileCreateCustomMetadataValue
+from llama_index.core.schema import NodeWithScore
 from pydantic import BaseModel
-import requests
+
 from app.api.routers.models import SourceNodes
 from app.engine.index import get_client
-from llama_index.core.schema import NodeWithScore
-
 
 logger = logging.getLogger("uvicorn")
 
@@ -64,27 +64,34 @@ class LLamaCloudFileService:
         pipeline_id: str,
         upload_file: Union[typing.IO, Tuple[str, BytesIO]],
         custom_metadata: Optional[Dict[str, PipelineFileCreateCustomMetadataValue]],
+        wait_for_processing: bool = True,
     ) -> str:
         client = get_client()
         file = client.files.upload_file(project_id=project_id, upload_file=upload_file)
+        file_id = file.id
         files = [
             {
-                "file_id": file.id,
-                "custom_metadata": {"file_id": file.id, **(custom_metadata or {})},
+                "file_id": file_id,
+                "custom_metadata": {"file_id": file_id, **(custom_metadata or {})},
             }
         ]
         files = client.pipelines.add_files_to_pipeline(pipeline_id, request=files)
 
+        if not wait_for_processing:
+            return file_id
+
         # Wait 2s for the file to be processed
         max_attempts = 20
         attempt = 0
         while attempt < max_attempts:
-            result = client.pipelines.get_pipeline_file_status(pipeline_id, file.id)
+            result = client.pipelines.get_pipeline_file_status(
+                file_id=file_id, pipeline_id=pipeline_id
+            )
             if result.status == ManagedIngestionStatus.ERROR:
                 raise Exception(f"File processing failed: {str(result)}")
             if result.status == ManagedIngestionStatus.SUCCESS:
                 # File is ingested - return the file id
-                return file.id
+                return file_id
             attempt += 1
             time.sleep(0.1)  # Sleep for 100ms
         raise Exception(
diff --git a/templates/types/extractor/fastapi/app/ui/components/upload.py b/templates/types/extractor/fastapi/app/ui/components/upload.py
index 64421feb98d8cc0eaff55bb85ec6ed78b8ef86e1..e404840a17df1a0a8fde6b7513f84aa1869bb7d4 100644
--- a/templates/types/extractor/fastapi/app/ui/components/upload.py
+++ b/templates/types/extractor/fastapi/app/ui/components/upload.py
@@ -2,6 +2,7 @@ import os
 from typing import List
 
 import reflex as rx
+
 from app.engine.generate import generate_datasource
 
 
@@ -78,10 +79,10 @@ def upload_component() -> rx.Component:
             UploadedFilesState.uploaded_files,
             lambda file: rx.card(
                 rx.stack(
-                    rx.text(file.file_name, size="sm"),
+                    rx.text(file.file_name, size="2"),
                     rx.button(
                         "x",
-                        size="sm",
+                        size="2",
                         on_click=UploadedFilesState.remove_file(file.file_name),
                     ),
                     justify="between",
diff --git a/templates/types/extractor/fastapi/pyproject.toml b/templates/types/extractor/fastapi/pyproject.toml
index e9574a019a9a7ccdc9ce679a21b56be347a2b428..a9cad0f7666ca8ae19f4360cbb53683c18a592d3 100644
--- a/templates/types/extractor/fastapi/pyproject.toml
+++ b/templates/types/extractor/fastapi/pyproject.toml
@@ -14,7 +14,7 @@ fastapi = "^0.109.1"
 uvicorn = { extras = ["standard"], version = "^0.23.2" }
 python-dotenv = "^1.0.0"
 pydantic = "<2.10"
-llama-index = "^0.11.1"
+llama-index = "^0.12.1"
 cachetools = "^5.3.3"
 reflex = "^0.6.2.post1"
 
diff --git a/templates/types/streaming/fastapi/app/services/file.py b/templates/types/streaming/fastapi/app/services/file.py
index 7aa6696c3a484506ab83acd8f88b4c55343c1377..3fc1a64f10b786ab295e39aea7f229bea3c3194b 100644
--- a/templates/types/streaming/fastapi/app/services/file.py
+++ b/templates/types/streaming/fastapi/app/services/file.py
@@ -249,6 +249,7 @@ class FileService:
             index.pipeline.id,
             upload_file,
             custom_metadata={},
+            wait_for_processing=True,
         )
         return doc_id
 
diff --git a/templates/types/streaming/fastapi/pyproject.toml b/templates/types/streaming/fastapi/pyproject.toml
index 4d181fa6b9a2318017ab28b55da23d2d2db97a47..46672bc51591d5f028cb957fc50dd0c378201a48 100644
--- a/templates/types/streaming/fastapi/pyproject.toml
+++ b/templates/types/streaming/fastapi/pyproject.toml
@@ -19,7 +19,7 @@ python-dotenv = "^1.0.0"
 pydantic = "<2.10"
 aiostream = "^0.5.2"
 cachetools = "^5.3.3"
-llama-index = "^0.11.17"
+llama-index = "^0.12.1"
 rich = "^13.9.4"
 
 [tool.poetry.group.dev.dependencies]