diff --git a/.changeset/selfish-tips-lie.md b/.changeset/selfish-tips-lie.md
new file mode 100644
index 0000000000000000000000000000000000000000..b2c2ab7a3aef540d44bea5a381ba7ce314b6d078
--- /dev/null
+++ b/.changeset/selfish-tips-lie.md
@@ -0,0 +1,5 @@
+---
+"create-llama": patch
+---
+
+Simplify and unify handling file uploads
diff --git a/templates/components/engines/python/agent/tools/interpreter.py b/templates/components/engines/python/agent/tools/interpreter.py
index 9d19ea883136231984fdbb7cff84db4ca827881b..e108bd9bb9e9cc64648679092732b6e17f337933 100644
--- a/templates/components/engines/python/agent/tools/interpreter.py
+++ b/templates/components/engines/python/agent/tools/interpreter.py
@@ -4,7 +4,7 @@ import os
 import uuid
 from typing import List, Optional
 
-from app.engine.utils.file_helper import FileMetadata, save_file
+from app.services.file import DocumentFile, FileService
 from e2b_code_interpreter import CodeInterpreter
 from e2b_code_interpreter.models import Logs
 from llama_index.core.tools import FunctionTool
@@ -32,7 +32,7 @@ class E2BCodeInterpreter:
     output_dir = "output/tools"
     uploaded_files_dir = "output/uploaded"
 
-    def __init__(self, api_key: str = None):
+    def __init__(self, api_key: Optional[str] = None):
         if api_key is None:
             api_key = os.getenv("E2B_API_KEY")
         filesever_url_prefix = os.getenv("FILESERVER_URL_PREFIX")
@@ -72,15 +72,17 @@ class E2BCodeInterpreter:
                         self.interpreter.files.write(file_path, content)
             logger.info(f"Uploaded {len(sandbox_files)} files to sandbox")
 
-    def _save_to_disk(self, base64_data: str, ext: str) -> FileMetadata:
+    def _save_to_disk(self, base64_data: str, ext: str) -> DocumentFile:
         buffer = base64.b64decode(base64_data)
 
-        filename = f"{uuid.uuid4()}.{ext}"  # generate a unique filename
-        output_path = os.path.join(self.output_dir, filename)
+        # Output from e2b doesn't have a name. Create a random name for it.
+        filename = f"e2b_file_{uuid.uuid4()}.{ext}"
 
-        file_metadata = save_file(buffer, file_path=output_path)
+        document_file = FileService.save_file(
+            buffer, file_name=filename, save_dir=self.output_dir
+        )
 
-        return file_metadata
+        return document_file
 
     def _parse_result(self, result) -> List[InterpreterExtraResult]:
         """
@@ -99,12 +101,12 @@ class E2BCodeInterpreter:
             for ext, data in zip(formats, results):
                 match ext:
                     case "png" | "svg" | "jpeg" | "pdf":
-                        file_metadata = self._save_to_disk(data, ext)
+                        document_file = self._save_to_disk(data, ext)
                         output.append(
                             InterpreterExtraResult(
                                 type=ext,
-                                filename=file_metadata.name,
-                                url=file_metadata.url,
+                                filename=document_file.name,
+                                url=document_file.url,
                             )
                         )
                     case _:
diff --git a/templates/components/engines/typescript/agent/tools/interpreter.ts b/templates/components/engines/typescript/agent/tools/interpreter.ts
index ae386a13f7ed1d9fa93623525aadb5f049e56f3b..44cc7cbc4d3ea3ea18ec28b461b7caa0ce35b08f 100644
--- a/templates/components/engines/typescript/agent/tools/interpreter.ts
+++ b/templates/components/engines/typescript/agent/tools/interpreter.ts
@@ -111,13 +111,16 @@ export class InterpreterTool implements BaseTool<InterpreterParameter> {
     // upload files to sandbox
     if (input.sandboxFiles) {
       console.log(`Uploading ${input.sandboxFiles.length} files to sandbox`);
-      for (const filePath of input.sandboxFiles) {
-        const fileName = path.basename(filePath);
-        const localFilePath = path.join(this.uploadedFilesDir, fileName);
-        const content = fs.readFileSync(localFilePath);
-        await this.codeInterpreter?.files.write(filePath, content);
+      try {
+        for (const filePath of input.sandboxFiles) {
+          const fileName = path.basename(filePath);
+          const localFilePath = path.join(this.uploadedFilesDir, fileName);
+          const content = fs.readFileSync(localFilePath);
+          await this.codeInterpreter?.files.write(filePath, content);
+        }
+      } catch (error) {
+        console.error("Got error when uploading files to sandbox", error);
       }
-      console.log(`Uploaded ${input.sandboxFiles.length} files to sandbox`);
     }
     return this.codeInterpreter;
   }
diff --git a/templates/components/llamaindex/typescript/documents/helper.ts b/templates/components/llamaindex/typescript/documents/helper.ts
index 52cc5d94326bba80254683c917b7ddf416d03a2b..44e0520b722dcb3a7fb8928bdd215006841be961 100644
--- a/templates/components/llamaindex/typescript/documents/helper.ts
+++ b/templates/components/llamaindex/typescript/documents/helper.ts
@@ -3,6 +3,7 @@ import crypto from "node:crypto";
 import fs from "node:fs";
 import path from "node:path";
 import { getExtractors } from "../../engine/loader";
+import { DocumentFile } from "../streaming/annotations";
 
 const MIME_TYPE_TO_EXT: Record<string, string> = {
   "application/pdf": "pdf",
@@ -14,27 +15,20 @@ const MIME_TYPE_TO_EXT: Record<string, string> = {
 
 const UPLOADED_FOLDER = "output/uploaded";
 
-export type FileMetadata = {
-  id: string;
-  name: string;
-  url: string;
-  refs: string[];
-};
-
 export async function storeAndParseFile(
-  filename: string,
+  name: string,
   fileBuffer: Buffer,
   mimeType: string,
-): Promise<FileMetadata> {
-  const fileMetadata = await storeFile(filename, fileBuffer, mimeType);
-  const documents: Document[] = await parseFile(fileBuffer, filename, mimeType);
+): Promise<DocumentFile> {
+  const file = await storeFile(name, fileBuffer, mimeType);
+  const documents: Document[] = await parseFile(fileBuffer, name, mimeType);
   // Update document IDs in the file metadata
-  fileMetadata.refs = documents.map((document) => document.id_ as string);
-  return fileMetadata;
+  file.refs = documents.map((document) => document.id_ as string);
+  return file;
 }
 
 export async function storeFile(
-  filename: string,
+  name: string,
   fileBuffer: Buffer,
   mimeType: string,
 ) {
@@ -42,15 +36,17 @@ export async function storeFile(
   if (!fileExt) throw new Error(`Unsupported document type: ${mimeType}`);
 
   const fileId = crypto.randomUUID();
-  const newFilename = `${fileId}_${sanitizeFileName(filename)}`;
+  const newFilename = `${sanitizeFileName(name)}_${fileId}.${fileExt}`;
   const filepath = path.join(UPLOADED_FOLDER, newFilename);
   const fileUrl = await saveDocument(filepath, fileBuffer);
   return {
     id: fileId,
     name: newFilename,
+    size: fileBuffer.length,
+    type: fileExt,
     url: fileUrl,
     refs: [] as string[],
-  } as FileMetadata;
+  } as DocumentFile;
 }
 
 export async function parseFile(
@@ -104,5 +100,6 @@ export async function saveDocument(filepath: string, content: string | Buffer) {
 }
 
 function sanitizeFileName(fileName: string) {
-  return fileName.replace(/[^a-zA-Z0-9_.-]/g, "_");
+  // Remove file extension and sanitize
+  return fileName.split(".")[0].replace(/[^a-zA-Z0-9_-]/g, "_");
 }
diff --git a/templates/components/llamaindex/typescript/documents/upload.ts b/templates/components/llamaindex/typescript/documents/upload.ts
index 158b05a1ac8decfa512c166ae93dedac20d3a724..10091ce007c34eb9325facde33a9d503f04cd3d5 100644
--- a/templates/components/llamaindex/typescript/documents/upload.ts
+++ b/templates/components/llamaindex/typescript/documents/upload.ts
@@ -2,40 +2,40 @@ import { Document, LLamaCloudFileService, VectorStoreIndex } from "llamaindex";
 import { LlamaCloudIndex } from "llamaindex/cloud/LlamaCloudIndex";
 import fs from "node:fs/promises";
 import path from "node:path";
-import { FileMetadata, parseFile, storeFile } from "./helper";
+import { DocumentFile } from "../streaming/annotations";
+import { parseFile, storeFile } from "./helper";
 import { runPipeline } from "./pipeline";
 
 export async function uploadDocument(
   index: VectorStoreIndex | LlamaCloudIndex | null,
-  filename: string,
+  name: string,
   raw: string,
-): Promise<FileMetadata> {
+): Promise<DocumentFile> {
   const [header, content] = raw.split(",");
   const mimeType = header.replace("data:", "").replace(";base64", "");
   const fileBuffer = Buffer.from(content, "base64");
 
   // Store file
-  const fileMetadata = await storeFile(filename, fileBuffer, mimeType);
+  const fileMetadata = await storeFile(name, fileBuffer, mimeType);
 
   // If the file is csv and has codeExecutorTool, we don't need to index the file.
   if (mimeType === "text/csv" && (await hasCodeExecutorTool())) {
     return fileMetadata;
   }
-
+  let documentIds: string[] = [];
   if (index instanceof LlamaCloudIndex) {
     // trigger LlamaCloudIndex API to upload the file and run the pipeline
     const projectId = await index.getProjectId();
     const pipelineId = await index.getPipelineId();
     try {
-      const documentId = await LLamaCloudFileService.addFileToPipeline(
-        projectId,
-        pipelineId,
-        new File([fileBuffer], filename, { type: mimeType }),
-        { private: "true" },
-      );
-      // Update file metadata with document IDs
-      fileMetadata.refs = [documentId];
-      return fileMetadata;
+      documentIds = [
+        await LLamaCloudFileService.addFileToPipeline(
+          projectId,
+          pipelineId,
+          new File([fileBuffer], name, { type: mimeType }),
+          { private: "true" },
+        ),
+      ];
     } catch (error) {
       if (
         error instanceof ReferenceError &&
@@ -47,14 +47,14 @@ export async function uploadDocument(
       }
       throw error;
     }
+  } else {
+    // run the pipeline for other vector store indexes
+    const documents: Document[] = await parseFile(fileBuffer, name, mimeType);
+    documentIds = await runPipeline(index, documents);
   }
 
-  // run the pipeline for other vector store indexes
-  const documents: Document[] = await parseFile(fileBuffer, filename, mimeType);
   // Update file metadata with document IDs
-  fileMetadata.refs = documents.map((document) => document.id_ as string);
-  // Run the pipeline
-  await runPipeline(index, documents);
+  fileMetadata.refs = documentIds;
   return fileMetadata;
 }
 
diff --git a/templates/components/llamaindex/typescript/streaming/annotations.ts b/templates/components/llamaindex/typescript/streaming/annotations.ts
index f8de88f8b611669aa7b1470ef66539a1de78f9b4..bf7e46ab55d8a7275c5ce1dfd766578dcfcd01b2 100644
--- a/templates/components/llamaindex/typescript/streaming/annotations.ts
+++ b/templates/components/llamaindex/typescript/streaming/annotations.ts
@@ -3,17 +3,13 @@ import { MessageContent, MessageContentDetail } from "llamaindex";
 
 export type DocumentFileType = "csv" | "pdf" | "txt" | "docx";
 
-export type UploadedFileMeta = {
+export type DocumentFile = {
   id: string;
   name: string;
-  url?: string;
-  refs?: string[];
-};
-
-export type DocumentFile = {
-  type: DocumentFileType;
+  size: number;
+  type: string;
   url: string;
-  metadata: UploadedFileMeta;
+  refs?: string[];
 };
 
 type Annotation = {
@@ -30,7 +26,7 @@ export function isValidMessages(messages: Message[]): boolean {
 export function retrieveDocumentIds(messages: Message[]): string[] {
   // retrieve document Ids from the annotations of all messages (if any)
   const documentFiles = retrieveDocumentFiles(messages);
-  return documentFiles.map((file) => file.metadata?.refs || []).flat();
+  return documentFiles.map((file) => file.refs || []).flat();
 }
 
 export function retrieveDocumentFiles(messages: Message[]): DocumentFile[] {
@@ -63,16 +59,15 @@ export function retrieveMessageContent(messages: Message[]): MessageContent {
 }
 
 function getFileContent(file: DocumentFile): string {
-  const fileMetadata = file.metadata;
-  let defaultContent = `=====File: ${fileMetadata.name}=====\n`;
+  let defaultContent = `=====File: ${file.name}=====\n`;
   // Include file URL if it's available
   const urlPrefix = process.env.FILESERVER_URL_PREFIX;
   let urlContent = "";
   if (urlPrefix) {
-    if (fileMetadata.url) {
-      urlContent = `File URL: ${fileMetadata.url}\n`;
+    if (file.url) {
+      urlContent = `File URL: ${file.url}\n`;
     } else {
-      urlContent = `File URL (instruction: do not update this file URL yourself): ${urlPrefix}/output/uploaded/${fileMetadata.name}\n`;
+      urlContent = `File URL (instruction: do not update this file URL yourself): ${urlPrefix}/output/uploaded/${file.name}\n`;
     }
   } else {
     console.warn(
@@ -82,11 +77,11 @@ function getFileContent(file: DocumentFile): string {
   defaultContent += urlContent;
 
   // Include document IDs if it's available
-  if (fileMetadata.refs) {
-    defaultContent += `Document IDs: ${fileMetadata.refs}\n`;
+  if (file.refs) {
+    defaultContent += `Document IDs: ${file.refs}\n`;
   }
   // Include sandbox file paths
-  const sandboxFilePath = `/tmp/${fileMetadata.name}`;
+  const sandboxFilePath = `/tmp/${file.name}`;
   defaultContent += `Sandbox file path (instruction: only use sandbox path for artifact or code interpreter tool): ${sandboxFilePath}\n`;
 
   return defaultContent;
diff --git a/templates/components/routers/python/sandbox.py b/templates/components/routers/python/sandbox.py
index 9efe146fd9c3158a99c5774ed4adb57812871340..0b07422ee89136f574a7ced2b08705f1205e04cd 100644
--- a/templates/components/routers/python/sandbox.py
+++ b/templates/components/routers/python/sandbox.py
@@ -172,10 +172,14 @@ def _download_cell_results(cell_results: Optional[List]) -> List[Dict[str, str]]
                 data = result[ext]
 
                 if ext in ["png", "svg", "jpeg", "pdf"]:
-                    file_path = os.path.join("output", "tools", f"{uuid.uuid4()}.{ext}")
                     base64_data = data
                     buffer = base64.b64decode(base64_data)
-                    file_meta = save_file(content=buffer, file_path=file_path)
+                    file_name = f"{uuid.uuid4()}.{ext}"
+                    file_meta = save_file(
+                        content=buffer,
+                        file_name=file_name,
+                        save_dir=os.path.join("output", "tools"),
+                    )
                     output.append(
                         {
                             "type": ext,
diff --git a/templates/components/services/python/file.py b/templates/components/services/python/file.py
deleted file mode 100644
index 3e9ad3e64ae90dc5188bde3f5d702aa062b3a828..0000000000000000000000000000000000000000
--- a/templates/components/services/python/file.py
+++ /dev/null
@@ -1,198 +0,0 @@
-import base64
-import mimetypes
-import os
-import re
-import uuid
-from io import BytesIO
-from pathlib import Path
-from typing import Dict, List, Optional, Tuple
-
-from app.engine.index import IndexConfig, get_index
-from app.engine.utils.file_helper import FileMetadata, save_file
-from llama_index.core import VectorStoreIndex
-from llama_index.core.ingestion import IngestionPipeline
-from llama_index.core.readers.file.base import (
-    _try_loading_included_file_formats as get_file_loaders_map,
-)
-from llama_index.core.schema import Document
-from llama_index.core.tools.function_tool import FunctionTool
-from llama_index.indices.managed.llama_cloud.base import LlamaCloudIndex
-from llama_index.readers.file import FlatReader
-
-
-def get_llamaparse_parser():
-    from app.engine.loaders import load_configs
-    from app.engine.loaders.file import FileLoaderConfig, llama_parse_parser
-
-    config = load_configs()
-    file_loader_config = FileLoaderConfig(**config["file"])
-    if file_loader_config.use_llama_parse:
-        return llama_parse_parser()
-    else:
-        return None
-
-
-def default_file_loaders_map():
-    default_loaders = get_file_loaders_map()
-    default_loaders[".txt"] = FlatReader
-    default_loaders[".csv"] = FlatReader
-    return default_loaders
-
-
-class PrivateFileService:
-    """
-    To store the files uploaded by the user and add them to the index.
-    """
-
-    PRIVATE_STORE_PATH = "output/uploaded"
-
-    @staticmethod
-    def _preprocess_base64_file(base64_content: str) -> Tuple[bytes, str | None]:
-        header, data = base64_content.split(",", 1)
-        mime_type = header.split(";")[0].split(":", 1)[1]
-        extension = mimetypes.guess_extension(mime_type)
-        # File data as bytes
-        return base64.b64decode(data), extension
-
-    @staticmethod
-    def _store_file(file_name, file_data) -> FileMetadata:
-        """
-        Store the file to the private directory and return the file metadata
-        """
-        # Store file to the private directory
-        os.makedirs(PrivateFileService.PRIVATE_STORE_PATH, exist_ok=True)
-        file_path = Path(os.path.join(PrivateFileService.PRIVATE_STORE_PATH, file_name))
-
-        return save_file(file_data, file_path=str(file_path))
-
-    @staticmethod
-    def _load_file_to_documents(file_metadata: FileMetadata) -> List[Document]:
-        """
-        Load the file from the private directory and return the documents
-        """
-        _, extension = os.path.splitext(file_metadata.name)
-        extension = extension.lstrip(".")
-
-        # Load file to documents
-        # If LlamaParse is enabled, use it to parse the file
-        # Otherwise, use the default file loaders
-        reader = get_llamaparse_parser()
-        if reader is None:
-            reader_cls = default_file_loaders_map().get(f".{extension}")
-            if reader_cls is None:
-                raise ValueError(f"File extension {extension} is not supported")
-            reader = reader_cls()
-        documents = reader.load_data(Path(file_metadata.path))
-        # Add custom metadata
-        for doc in documents:
-            doc.metadata["file_name"] = file_metadata.name
-            doc.metadata["private"] = "true"
-        return documents
-
-    @staticmethod
-    def _add_documents_to_vector_store_index(
-        documents: List[Document], index: VectorStoreIndex
-    ) -> None:
-        """
-        Add the documents to the vector store index
-        """
-        pipeline = IngestionPipeline()
-        nodes = pipeline.run(documents=documents)
-
-        # Add the nodes to the index and persist it
-        if index is None:
-            index = VectorStoreIndex(nodes=nodes)
-        else:
-            index.insert_nodes(nodes=nodes)
-        index.storage_context.persist(
-            persist_dir=os.environ.get("STORAGE_DIR", "storage")
-        )
-
-    @staticmethod
-    def _add_file_to_llama_cloud_index(
-        index: LlamaCloudIndex,
-        file_name: str,
-        file_data: bytes,
-    ) -> str:
-        """
-        Add the file to the LlamaCloud index.
-        LlamaCloudIndex is a managed index so we can directly use the files.
-        """
-        try:
-            from app.engine.service import LLamaCloudFileService
-        except ImportError:
-            raise ValueError("LlamaCloudFileService is not found")
-
-        project_id = index._get_project_id()
-        pipeline_id = index._get_pipeline_id()
-        # LlamaCloudIndex is a managed index so we can directly use the files
-        upload_file = (file_name, BytesIO(file_data))
-        doc_id = LLamaCloudFileService.add_file_to_pipeline(
-            project_id,
-            pipeline_id,
-            upload_file,
-            custom_metadata={},
-        )
-        return doc_id
-
-    @staticmethod
-    def _sanitize_file_name(file_name: str) -> str:
-        file_name, extension = os.path.splitext(file_name)
-        return re.sub(r"[^a-zA-Z0-9]", "_", file_name) + extension
-
-    @classmethod
-    def process_file(
-        cls,
-        file_name: str,
-        base64_content: str,
-        params: Optional[dict] = None,
-    ) -> FileMetadata:
-        if params is None:
-            params = {}
-
-        # Add the nodes to the index and persist it
-        index_config = IndexConfig(**params)
-        index = get_index(index_config)
-
-        # Generate a new file name if the same file is uploaded multiple times
-        file_id = str(uuid.uuid4())
-        new_file_name = f"{file_id}_{cls._sanitize_file_name(file_name)}"
-
-        # Preprocess and store the file
-        file_data, extension = cls._preprocess_base64_file(base64_content)
-        file_metadata = cls._store_file(new_file_name, file_data)
-
-        tools = cls._get_available_tools()
-        code_executor_tools = ["interpreter", "artifact"]
-        # If the file is CSV and there is a code executor tool, we don't need to index.
-        if extension == ".csv" and any(tool in tools for tool in code_executor_tools):
-            return file_metadata
-        else:
-            # Insert the file into the index and update document ids to the file metadata
-            if isinstance(index, LlamaCloudIndex):
-                doc_id = cls._add_file_to_llama_cloud_index(
-                    index, new_file_name, file_data
-                )
-                # Add document ids to the file metadata
-                file_metadata.refs = [doc_id]
-            else:
-                documents = cls._load_file_to_documents(file_metadata)
-                cls._add_documents_to_vector_store_index(documents, index)
-                # Add document ids to the file metadata
-                file_metadata.refs = [doc.doc_id for doc in documents]
-
-        # Return the file metadata
-        return file_metadata
-
-    @staticmethod
-    def _get_available_tools() -> Dict[str, List[FunctionTool]]:
-        try:
-            from app.engine.tools import ToolFactory
-
-            tools = ToolFactory.from_env(map_result=True)
-            return tools
-        except ImportError:
-            # There is no tool code
-            return {}
-        except Exception as e:
-            raise ValueError(f"Failed to get available tools: {e}") from e
diff --git a/templates/types/streaming/express/src/controllers/chat-upload.controller.ts b/templates/types/streaming/express/src/controllers/chat-upload.controller.ts
index 70dfa2feb24a1eb078a7407f2ccbf022ecf9130d..9477f3ff190cdad6018d6cf534fa8742b308554c 100644
--- a/templates/types/streaming/express/src/controllers/chat-upload.controller.ts
+++ b/templates/types/streaming/express/src/controllers/chat-upload.controller.ts
@@ -4,11 +4,11 @@ import { uploadDocument } from "./llamaindex/documents/upload";
 
 export const chatUpload = async (req: Request, res: Response) => {
   const {
-    filename,
+    name,
     base64,
     params,
-  }: { filename: string; base64: string; params?: any } = req.body;
-  if (!base64 || !filename) {
+  }: { name: string; base64: string; params?: any } = req.body;
+  if (!base64 || !name) {
     return res.status(400).json({
       error: "base64 and filename is required in the request body",
     });
@@ -20,5 +20,5 @@ export const chatUpload = async (req: Request, res: Response) => {
         "StorageContext is empty - call 'npm run generate' to generate the storage first",
     });
   }
-  return res.status(200).json(await uploadDocument(index, filename, base64));
+  return res.status(200).json(await uploadDocument(index, name, base64));
 };
diff --git a/templates/types/streaming/fastapi/app/api/routers/chat_config.py b/templates/types/streaming/fastapi/app/api/routers/chat_config.py
index 228664d3db541d5ec06463c1cd9f87a2421c9813..495ee99be18e9a4660c7da6a6cc68554b5d7ab1a 100644
--- a/templates/types/streaming/fastapi/app/api/routers/chat_config.py
+++ b/templates/types/streaming/fastapi/app/api/routers/chat_config.py
@@ -10,6 +10,42 @@ config_router = r = APIRouter()
 logger = logging.getLogger("uvicorn")
 
 
+def _is_llama_cloud_service_configured():
+    try:
+        from app.engine.service import LLamaCloudFileService  # noqa
+
+        return True
+    except ImportError:
+        return False
+
+
+async def chat_llama_cloud_config():
+    from app.engine.service import LLamaCloudFileService
+
+    if not os.getenv("LLAMA_CLOUD_API_KEY"):
+        raise HTTPException(
+            status_code=500, detail="LlamaCloud API KEY is not configured"
+        )
+    projects = LLamaCloudFileService.get_all_projects_with_pipelines()
+    pipeline = os.getenv("LLAMA_CLOUD_INDEX_NAME")
+    project = os.getenv("LLAMA_CLOUD_PROJECT_NAME")
+    pipeline_config = None
+    if pipeline and project:
+        pipeline_config = {
+            "pipeline": pipeline,
+            "project": project,
+        }
+    return {
+        "projects": projects,
+        "pipeline": pipeline_config,
+    }
+
+
+if _is_llama_cloud_service_configured():
+    logger.info("LlamaCloud is configured. Adding /config/llamacloud route.")
+    r.add_api_route("/llamacloud", chat_llama_cloud_config, methods=["GET"])
+
+
 @r.get("")
 async def chat_config() -> ChatConfig:
     starter_questions = None
@@ -17,33 +53,3 @@ async def chat_config() -> ChatConfig:
     if conversation_starters and conversation_starters.strip():
         starter_questions = conversation_starters.strip().split("\n")
     return ChatConfig(starter_questions=starter_questions)
-
-
-try:
-    from app.engine.service import LLamaCloudFileService
-
-    print("LlamaCloud is configured. Adding /config/llamacloud route.")
-
-    @r.get("/llamacloud")
-    async def chat_llama_cloud_config():
-        if not os.getenv("LLAMA_CLOUD_API_KEY"):
-            raise HTTPException(
-                status_code=500, detail="LlamaCloud API KEY is not configured"
-            )
-        projects = LLamaCloudFileService.get_all_projects_with_pipelines()
-        pipeline = os.getenv("LLAMA_CLOUD_INDEX_NAME")
-        project = os.getenv("LLAMA_CLOUD_PROJECT_NAME")
-        pipeline_config = None
-        if pipeline and project:
-            pipeline_config = {
-                "pipeline": pipeline,
-                "project": project,
-            }
-        return {
-            "projects": projects,
-            "pipeline": pipeline_config,
-        }
-
-except ImportError:
-    print("LlamaCloud is not configured. Skipping adding /config/llamacloud route.")
-    pass
diff --git a/templates/types/streaming/fastapi/app/api/routers/models.py b/templates/types/streaming/fastapi/app/api/routers/models.py
index 3bbe7b6e7a1af4d5cb5bb62030d13ce322814c7a..a8bb991eb746577a14014b2f3d1e70df0f8b6df9 100644
--- a/templates/types/streaming/fastapi/app/api/routers/models.py
+++ b/templates/types/streaming/fastapi/app/api/routers/models.py
@@ -8,79 +8,68 @@ from pydantic import BaseModel, Field, validator
 from pydantic.alias_generators import to_camel
 
 from app.config import DATA_DIR
+from app.services.file import DocumentFile
 
 logger = logging.getLogger("uvicorn")
 
 
-class FileMetadata(BaseModel):
-    id: str
-    name: str
-    url: Optional[str] = None
-    refs: Optional[List[str]] = None
+class AnnotationFileData(BaseModel):
+    files: List[DocumentFile] = Field(
+        default=[],
+        description="List of files",
+    )
+
+    class Config:
+        json_schema_extra = {
+            "example": {
+                "files": [
+                    {
+                        "content": "data:text/plain;base64,aGVsbG8gd29ybGQK=",
+                        "name": "example.txt",
+                    }
+                ]
+            }
+        }
+        alias_generator = to_camel
 
-    def _get_url_llm_content(self) -> Optional[str]:
+    @staticmethod
+    def _get_url_llm_content(file: DocumentFile) -> Optional[str]:
         url_prefix = os.getenv("FILESERVER_URL_PREFIX")
         if url_prefix:
-            if self.url is not None:
-                return f"File URL: {self.url}\n"
+            if file.url is not None:
+                return f"File URL: {file.url}\n"
             else:
                 # Construct url from file name
-                return f"File URL (instruction: do not update this file URL yourself): {url_prefix}/output/uploaded/{self.name}\n"
+                return f"File URL (instruction: do not update this file URL yourself): {url_prefix}/output/uploaded/{file.name}\n"
         else:
             logger.warning(
                 "Warning: FILESERVER_URL_PREFIX not set in environment variables. Can't use file server"
             )
             return None
 
-    def to_llm_content(self) -> str:
+    @classmethod
+    def _get_file_content(cls, file: DocumentFile) -> str:
         """
         Construct content for LLM from the file metadata
         """
-        default_content = f"=====File: {self.name}=====\n"
+        default_content = f"=====File: {file.name}=====\n"
         # Include file URL if it's available
-        url_content = self._get_url_llm_content()
+        url_content = cls._get_url_llm_content(file)
         if url_content:
             default_content += url_content
         # Include document IDs if it's available
-        if self.refs is not None:
-            default_content += f"Document IDs: {self.refs}\n"
+        if file.refs is not None:
+            default_content += f"Document IDs: {file.refs}\n"
         # Include sandbox file path
-        sandbox_file_path = f"/tmp/{self.name}"
+        sandbox_file_path = f"/tmp/{file.name}"
         default_content += f"Sandbox file path (instruction: only use sandbox path for artifact or code interpreter tool): {sandbox_file_path}\n"
         return default_content
 
-
-class File(BaseModel):
-    filetype: str
-    metadata: FileMetadata
-
-    def _load_file_content(self) -> str:
-        file_path = f"output/uploaded/{self.metadata.name}"
-        with open(file_path, "r") as file:
-            return file.read()
-
-
-class AnnotationFileData(BaseModel):
-    files: List[File] = Field(
-        default=[],
-        description="List of files",
-    )
-
-    class Config:
-        json_schema_extra = {
-            "example": {
-                "csvFiles": [
-                    {
-                        "content": "Name, Age\nAlice, 25\nBob, 30",
-                        "filename": "example.csv",
-                        "filesize": 123,
-                        "id": "123",
-                        "type": "text/csv",
-                    }
-                ]
-            }
-        }
-        alias_generator = to_camel
+    def to_llm_content(self) -> Optional[str]:
+        file_contents = [self._get_file_content(file) for file in self.files]
+        if len(file_contents) == 0:
+            return None
+        return "Use data from following files content\n" + "\n".join(file_contents)
 
 
 class AgentAnnotation(BaseModel):
@@ -99,12 +88,7 @@ class Annotation(BaseModel):
 
     def to_content(self) -> Optional[str]:
         if self.type == "document_file" and isinstance(self.data, AnnotationFileData):
-            # iterate through all files and construct content for LLM
-            file_contents = [file.metadata.to_llm_content() for file in self.data.files]
-            if len(file_contents) > 0:
-                return "Use data from following files content\n" + "\n".join(
-                    file_contents
-                )
+            return self.data.to_llm_content()
         elif self.type == "image":
             raise NotImplementedError("Use image file is not supported yet!")
         else:
@@ -247,14 +231,14 @@ class ChatData(BaseModel):
         Get the document IDs from the chat messages
         """
         document_ids: List[str] = []
-        uploaded_files = self.get_uploaded_files()
+        uploaded_files = self.get_document_files()
         for _file in uploaded_files:
-            refs = _file.metadata.refs
+            refs = getattr(_file, "refs", None)
             if refs is not None:
                 document_ids.extend(refs)
         return list(set(document_ids))
 
-    def get_uploaded_files(self) -> List[File]:
+    def get_document_files(self) -> List[DocumentFile]:
         """
         Get the uploaded files from the chat data
         """
diff --git a/templates/types/streaming/fastapi/app/api/routers/upload.py b/templates/types/streaming/fastapi/app/api/routers/upload.py
index 78aff33cd1df817adb37cc622d07ac635e06bd2b..2a4d35587d07425fc8a785ee49a5de23c81f7b5a 100644
--- a/templates/types/streaming/fastapi/app/api/routers/upload.py
+++ b/templates/types/streaming/fastapi/app/api/routers/upload.py
@@ -1,10 +1,11 @@
 import logging
-from typing import Any, Dict
+from typing import Any
 
 from fastapi import APIRouter, HTTPException
 from pydantic import BaseModel
 
-from app.api.services.file import PrivateFileService
+from app.api.routers.models import DocumentFile
+from app.services.file import FileService
 
 file_upload_router = r = APIRouter()
 
@@ -13,23 +14,20 @@ logger = logging.getLogger("uvicorn")
 
 class FileUploadRequest(BaseModel):
     base64: str
-    filename: str
+    name: str
     params: Any = None
 
 
 @r.post("")
-def upload_file(request: FileUploadRequest) -> Dict[str, Any]:
+def upload_file(request: FileUploadRequest) -> DocumentFile:
     """
     To upload a private file from the chat UI.
-    Returns:
-        The metadata of the uploaded file.
     """
     try:
-        logger.info(f"Processing file: {request.filename}")
-        file_meta = PrivateFileService.process_file(
-            request.filename, request.base64, request.params
+        logger.info(f"Processing file: {request.name}")
+        return FileService.process_private_file(
+            request.name, request.base64, request.params
         )
-        return file_meta.to_upload_response()
     except Exception as e:
         logger.error(f"Error processing file: {e}", exc_info=True)
         raise HTTPException(status_code=500, detail="Error processing file")
diff --git a/templates/types/streaming/fastapi/app/engine/utils/file_helper.py b/templates/types/streaming/fastapi/app/engine/utils/file_helper.py
deleted file mode 100644
index 5270139eabab1900d991ee14c3498c1d115ed063..0000000000000000000000000000000000000000
--- a/templates/types/streaming/fastapi/app/engine/utils/file_helper.py
+++ /dev/null
@@ -1,83 +0,0 @@
-import logging
-import os
-import uuid
-from typing import Any, Dict, List, Optional
-
-from pydantic import BaseModel, Field, computed_field
-
-logger = logging.getLogger(__name__)
-
-
-class FileMetadata(BaseModel):
-    path: str = Field(..., description="The stored path of the file")
-    name: str = Field(..., description="The name of the file")
-    url: str = Field(..., description="The URL of the file")
-    refs: Optional[List[str]] = Field(
-        None, description="The indexed document IDs that the file is referenced to"
-    )
-
-    @computed_field
-    def file_id(self) -> Optional[str]:
-        file_els = self.name.split("_", maxsplit=1)
-        if len(file_els) == 2:
-            return file_els[0]
-        return None
-
-    def to_upload_response(self) -> Dict[str, Any]:
-        response = {
-            "id": self.file_id,
-            "name": self.name,
-            "url": self.url,
-            "refs": self.refs,
-        }
-        return response
-
-
-def save_file(
-    content: bytes | str,
-    file_name: Optional[str] = None,
-    file_path: Optional[str] = None,
-) -> FileMetadata:
-    """
-    Save the content to a file in the local file server (accessible via URL)
-    Args:
-        content (bytes | str): The content to save, either bytes or string.
-        file_name (Optional[str]): The name of the file. If not provided, a random name will be generated with .txt extension.
-        file_path (Optional[str]): The path to save the file to. If not provided, a random name will be generated.
-    Returns:
-        The metadata of the saved file.
-    """
-    if file_name is not None and file_path is not None:
-        raise ValueError("Either file_name or file_path should be provided")
-
-    if file_path is None:
-        if file_name is None:
-            file_name = f"{uuid.uuid4()}.txt"
-        file_path = os.path.join(os.getcwd(), file_name)
-    else:
-        file_name = os.path.basename(file_path)
-
-    if isinstance(content, str):
-        content = content.encode()
-
-    try:
-        os.makedirs(os.path.dirname(file_path), exist_ok=True)
-        with open(file_path, "wb") as file:
-            file.write(content)
-    except PermissionError as e:
-        logger.error(f"Permission denied when writing to file {file_path}: {str(e)}")
-        raise
-    except IOError as e:
-        logger.error(f"IO error occurred when writing to file {file_path}: {str(e)}")
-        raise
-    except Exception as e:
-        logger.error(f"Unexpected error when writing to file {file_path}: {str(e)}")
-        raise
-
-    logger.info(f"Saved file to {file_path}")
-
-    return FileMetadata(
-        path=file_path if isinstance(file_path, str) else str(file_path),
-        name=file_name,
-        url=f"{os.getenv('FILESERVER_URL_PREFIX')}/{file_path}",
-    )
diff --git a/templates/types/streaming/fastapi/app/services/file.py b/templates/types/streaming/fastapi/app/services/file.py
new file mode 100644
index 0000000000000000000000000000000000000000..02e0084414aed4205293a14fae2b8a51499f1e6a
--- /dev/null
+++ b/templates/types/streaming/fastapi/app/services/file.py
@@ -0,0 +1,300 @@
+import base64
+import logging
+import mimetypes
+import os
+import re
+import uuid
+from io import BytesIO
+from pathlib import Path
+from typing import Dict, List, Optional, Tuple
+
+from llama_index.core import VectorStoreIndex
+from llama_index.core.ingestion import IngestionPipeline
+from llama_index.core.readers.file.base import (
+    _try_loading_included_file_formats as get_file_loaders_map,
+)
+from llama_index.core.schema import Document
+from llama_index.core.tools.function_tool import FunctionTool
+from llama_index.indices.managed.llama_cloud.base import LlamaCloudIndex
+from llama_index.readers.file import FlatReader
+from pydantic import BaseModel, Field
+
+logger = logging.getLogger(__name__)
+
+PRIVATE_STORE_PATH = str(Path("output", "uploaded"))
+TOOL_STORE_PATH = str(Path("output", "tools"))
+LLAMA_CLOUD_STORE_PATH = str(Path("output", "llamacloud"))
+
+
+class DocumentFile(BaseModel):
+    id: str
+    name: str  # Stored file name
+    type: str = None
+    size: int = None
+    url: str = None
+    path: Optional[str] = Field(
+        None,
+        description="The stored file path. Used internally in the server.",
+        exclude=True,
+    )
+    refs: Optional[List[str]] = Field(
+        None, description="The document ids in the index."
+    )
+
+
+class FileService:
+    """
+    To store the files uploaded by the user and add them to the index.
+    """
+
+    @classmethod
+    def process_private_file(
+        cls,
+        file_name: str,
+        base64_content: str,
+        params: Optional[dict] = None,
+    ) -> DocumentFile:
+        """
+        Store the uploaded file and index it if necessary.
+        """
+        try:
+            from app.engine.index import IndexConfig, get_index
+        except ImportError as e:
+            raise ValueError("IndexConfig or get_index is not found") from e
+
+        if params is None:
+            params = {}
+
+        # Add the nodes to the index and persist it
+        index_config = IndexConfig(**params)
+        index = get_index(index_config)
+
+        # Preprocess and store the file
+        file_data, extension = cls._preprocess_base64_file(base64_content)
+
+        document_file = cls.save_file(
+            file_data,
+            file_name=file_name,
+            save_dir=PRIVATE_STORE_PATH,
+        )
+
+        tools = _get_available_tools()
+        code_executor_tools = ["interpreter", "artifact"]
+        # If the file is CSV and there is a code executor tool, we don't need to index.
+        if extension == "csv" and any(tool in tools for tool in code_executor_tools):
+            return document_file
+        else:
+            # Insert the file into the index and update document ids to the file metadata
+            if isinstance(index, LlamaCloudIndex):
+                doc_id = cls._add_file_to_llama_cloud_index(
+                    index, document_file.name, file_data
+                )
+                # Add document ids to the file metadata
+                document_file.refs = [doc_id]
+            else:
+                documents = cls._load_file_to_documents(document_file)
+                cls._add_documents_to_vector_store_index(documents, index)
+                # Add document ids to the file metadata
+                document_file.refs = [doc.doc_id for doc in documents]
+
+        # Return the file metadata
+        return document_file
+
+    @classmethod
+    def save_file(
+        cls,
+        content: bytes | str,
+        file_name: str,
+        save_dir: Optional[str] = None,
+    ) -> DocumentFile:
+        """
+        Save the content to a file in the local file server (accessible via URL)
+
+        Args:
+            content (bytes | str): The content to save, either bytes or string.
+            file_name (str): The original name of the file.
+            save_dir (Optional[str]): The relative path from the current working directory. Defaults to the `output/uploaded` directory.
+        Returns:
+            The metadata of the saved file.
+        """
+        if save_dir is None:
+            save_dir = os.path.join("output", "uploaded")
+
+        file_id = str(uuid.uuid4())
+        name, extension = os.path.splitext(file_name)
+        extension = extension.lstrip(".")
+        sanitized_name = _sanitize_file_name(name)
+        if extension == "":
+            raise ValueError("File is not supported!")
+        new_file_name = f"{sanitized_name}_{file_id}.{extension}"
+
+        file_path = os.path.join(save_dir, new_file_name)
+
+        if isinstance(content, str):
+            content = content.encode()
+
+        try:
+            os.makedirs(os.path.dirname(file_path), exist_ok=True)
+            with open(file_path, "wb") as file:
+                file.write(content)
+        except PermissionError as e:
+            logger.error(
+                f"Permission denied when writing to file {file_path}: {str(e)}"
+            )
+            raise
+        except IOError as e:
+            logger.error(
+                f"IO error occurred when writing to file {file_path}: {str(e)}"
+            )
+            raise
+        except Exception as e:
+            logger.error(f"Unexpected error when writing to file {file_path}: {str(e)}")
+            raise
+
+        logger.info(f"Saved file to {file_path}")
+
+        file_url_prefix = os.getenv("FILESERVER_URL_PREFIX")
+        if file_url_prefix is None:
+            logger.warning(
+                "FILESERVER_URL_PREFIX is not set, fallback to http://localhost:8000/api/files"
+            )
+            file_url_prefix = "http://localhost:8000/api/files"
+        file_size = os.path.getsize(file_path)
+
+        file_url = os.path.join(
+            file_url_prefix,
+            save_dir,
+            new_file_name,
+        )
+
+        return DocumentFile(
+            id=file_id,
+            name=new_file_name,
+            type=extension,
+            size=file_size,
+            path=file_path,
+            url=file_url,
+            refs=None,
+        )
+
+    @staticmethod
+    def _preprocess_base64_file(base64_content: str) -> Tuple[bytes, str | None]:
+        header, data = base64_content.split(",", 1)
+        mime_type = header.split(";")[0].split(":", 1)[1]
+        extension = mimetypes.guess_extension(mime_type).lstrip(".")
+        # File data as bytes
+        return base64.b64decode(data), extension
+
+    @staticmethod
+    def _load_file_to_documents(file: DocumentFile) -> List[Document]:
+        """
+        Load the file from the private directory and return the documents
+        """
+        _, extension = os.path.splitext(file.name)
+        extension = extension.lstrip(".")
+
+        # Load file to documents
+        # If LlamaParse is enabled, use it to parse the file
+        # Otherwise, use the default file loaders
+        reader = _get_llamaparse_parser()
+        if reader is None:
+            reader_cls = _default_file_loaders_map().get(f".{extension}")
+            if reader_cls is None:
+                raise ValueError(f"File extension {extension} is not supported")
+            reader = reader_cls()
+        if file.path is None:
+            raise ValueError("Document file path is not set")
+        documents = reader.load_data(Path(file.path))
+        # Add custom metadata
+        for doc in documents:
+            doc.metadata["file_name"] = file.name
+            doc.metadata["private"] = "true"
+        return documents
+
+    @staticmethod
+    def _add_documents_to_vector_store_index(
+        documents: List[Document], index: VectorStoreIndex
+    ) -> None:
+        """
+        Add the documents to the vector store index
+        """
+        pipeline = IngestionPipeline()
+        nodes = pipeline.run(documents=documents)
+
+        # Add the nodes to the index and persist it
+        if index is None:
+            index = VectorStoreIndex(nodes=nodes)
+        else:
+            index.insert_nodes(nodes=nodes)
+        index.storage_context.persist(
+            persist_dir=os.environ.get("STORAGE_DIR", "storage")
+        )
+
+    @staticmethod
+    def _add_file_to_llama_cloud_index(
+        index: LlamaCloudIndex,
+        file_name: str,
+        file_data: bytes,
+    ) -> str:
+        """
+        Add the file to the LlamaCloud index.
+        LlamaCloudIndex is a managed index so we can directly use the files.
+        """
+        try:
+            from app.engine.service import LLamaCloudFileService
+        except ImportError as e:
+            raise ValueError("LlamaCloudFileService is not found") from e
+
+        project_id = index._get_project_id()
+        pipeline_id = index._get_pipeline_id()
+        # LlamaCloudIndex is a managed index so we can directly use the files
+        upload_file = (file_name, BytesIO(file_data))
+        doc_id = LLamaCloudFileService.add_file_to_pipeline(
+            project_id,
+            pipeline_id,
+            upload_file,
+            custom_metadata={},
+        )
+        return doc_id
+
+
+def _sanitize_file_name(file_name: str) -> str:
+    """
+    Sanitize the file name by replacing all non-alphanumeric characters with underscores
+    """
+    sanitized_name = re.sub(r"[^a-zA-Z0-9.]", "_", file_name)
+    return sanitized_name
+
+
+def _get_llamaparse_parser():
+    from app.engine.loaders import load_configs
+    from app.engine.loaders.file import FileLoaderConfig, llama_parse_parser
+
+    config = load_configs()
+    file_loader_config = FileLoaderConfig(**config["file"])
+    if file_loader_config.use_llama_parse:
+        return llama_parse_parser()
+    else:
+        return None
+
+
+def _default_file_loaders_map():
+    default_loaders = get_file_loaders_map()
+    default_loaders[".txt"] = FlatReader
+    default_loaders[".csv"] = FlatReader
+    return default_loaders
+
+
+def _get_available_tools() -> Dict[str, List[FunctionTool]]:
+    try:
+        from app.engine.tools import ToolFactory
+    except ImportError:
+        logger.warning("ToolFactory not found, no tools will be available")
+        return {}
+
+    try:
+        tools = ToolFactory.from_env(map_result=True)
+        return tools  # type: ignore
+    except Exception as e:
+        logger.error(f"Error loading tools from environment: {str(e)}")
+        raise ValueError(f"Failed to get available tools: {str(e)}") from e
diff --git a/templates/types/streaming/nextjs/app/api/chat/upload/route.ts b/templates/types/streaming/nextjs/app/api/chat/upload/route.ts
index 382a94c937fd779e347d49b9ba8a59515c12487f..05939e38c31ead889f474d0ed1b90ba5f9b4e7dc 100644
--- a/templates/types/streaming/nextjs/app/api/chat/upload/route.ts
+++ b/templates/types/streaming/nextjs/app/api/chat/upload/route.ts
@@ -11,19 +11,23 @@ export const dynamic = "force-dynamic";
 export async function POST(request: NextRequest) {
   try {
     const {
-      filename,
+      name,
       base64,
       params,
-    }: { filename: string; base64: string; params?: any } =
-      await request.json();
-    if (!base64 || !filename) {
+    }: {
+      name: string;
+      base64: string;
+      params?: any;
+    } = await request.json();
+    if (!base64 || !name) {
       return NextResponse.json(
-        { error: "base64 and filename is required in the request body" },
+        { error: "base64 and name is required in the request body" },
         { status: 400 },
       );
     }
     const index = await getDataSource(params);
-    return NextResponse.json(await uploadDocument(index, filename, base64));
+    const documentFile = await uploadDocument(index, name, base64);
+    return NextResponse.json(documentFile);
   } catch (error) {
     console.error("[Upload API]", error);
     return NextResponse.json(
diff --git a/templates/types/streaming/nextjs/app/components/ui/chat/chat-input.tsx b/templates/types/streaming/nextjs/app/components/ui/chat/chat-input.tsx
index ce2b02d063f14fc6064b6fbe94c321409dad3f84..33b1b92d90ae83d27df195b8dc821edff14f14e2 100644
--- a/templates/types/streaming/nextjs/app/components/ui/chat/chat-input.tsx
+++ b/templates/types/streaming/nextjs/app/components/ui/chat/chat-input.tsx
@@ -1,5 +1,6 @@
 import { JSONValue } from "ai";
 import React from "react";
+import { DocumentFile } from ".";
 import { Button } from "../button";
 import { DocumentPreview } from "../document-preview";
 import FileUploader from "../file-uploader";
@@ -65,8 +66,8 @@ export default function ChatInput(
   };
 
   const handleUploadFile = async (file: File) => {
-    if (imageUrl || files.length > 0) {
-      alert("You can only upload one file at a time.");
+    if (imageUrl) {
+      alert("You can only upload one image at a time.");
       return;
     }
     try {
@@ -95,9 +96,9 @@ export default function ChatInput(
       )}
       {files.length > 0 && (
         <div className="flex gap-4 w-full overflow-auto py-2">
-          {files.map((file, index) => (
+          {files.map((file: DocumentFile) => (
             <DocumentPreview
-              key={file.metadata?.id ?? `${file.filename}-${index}`}
+              key={file.id}
               file={file}
               onRemove={() => removeDoc(file)}
             />
diff --git a/templates/types/streaming/nextjs/app/components/ui/chat/chat-message/chat-files.tsx b/templates/types/streaming/nextjs/app/components/ui/chat/chat-message/chat-files.tsx
index 085963d2b940db8369293b97dfb737e11bce32c6..0a5859a96d36efbf9cf39fa1a47d686d4e880f4b 100644
--- a/templates/types/streaming/nextjs/app/components/ui/chat/chat-message/chat-files.tsx
+++ b/templates/types/streaming/nextjs/app/components/ui/chat/chat-message/chat-files.tsx
@@ -6,10 +6,7 @@ export function ChatFiles({ data }: { data: DocumentFileData }) {
   return (
     <div className="flex gap-2 items-center">
       {data.files.map((file, index) => (
-        <DocumentPreview
-          key={file.metadata?.id ?? `${file.filename}-${index}`}
-          file={file}
-        />
+        <DocumentPreview key={file.id} file={file} />
       ))}
     </div>
   );
diff --git a/templates/types/streaming/nextjs/app/components/ui/chat/chat-message/chat-sources.tsx b/templates/types/streaming/nextjs/app/components/ui/chat/chat-message/chat-sources.tsx
index 03a1a62783ace6dd33e19293f52c31f7a85e5ba2..c0da0031ace713d18a1701e3aac11b6b0ff35c98 100644
--- a/templates/types/streaming/nextjs/app/components/ui/chat/chat-message/chat-sources.tsx
+++ b/templates/types/streaming/nextjs/app/components/ui/chat/chat-message/chat-sources.tsx
@@ -104,8 +104,8 @@ export function DocumentInfo({
   const fileExt = fileName?.split(".").pop() as DocumentFileType | undefined;
 
   const previewFile = {
-    filename: fileName,
-    filetype: fileExt,
+    name: fileName,
+    type: fileExt as DocumentFileType,
   };
 
   const DocumentDetail = (
diff --git a/templates/types/streaming/nextjs/app/components/ui/chat/hooks/use-file.ts b/templates/types/streaming/nextjs/app/components/ui/chat/hooks/use-file.ts
index 695202f4ff146c886813e8b9e38c3ceb0e4bfde5..049db6ba708f720167fb644e07f46a39173cfecc 100644
--- a/templates/types/streaming/nextjs/app/components/ui/chat/hooks/use-file.ts
+++ b/templates/types/streaming/nextjs/app/components/ui/chat/hooks/use-file.ts
@@ -7,7 +7,6 @@ import {
   DocumentFileType,
   MessageAnnotation,
   MessageAnnotationType,
-  UploadedFileMeta,
 } from "..";
 import { useClientConfig } from "./use-config";
 
@@ -24,14 +23,8 @@ export function useFile() {
   const [imageUrl, setImageUrl] = useState<string | null>(null);
   const [files, setFiles] = useState<DocumentFile[]>([]);
 
-  const docEqual = (a: DocumentFile, b: DocumentFile) => {
-    if (a.metadata?.id === b.metadata?.id) return true;
-    if (a.filename === b.filename && a.filesize === b.filesize) return true;
-    return false;
-  };
-
   const addDoc = (file: DocumentFile) => {
-    const existedFile = files.find((f) => docEqual(f, file));
+    const existedFile = files.find((f) => f.id === file.id);
     if (!existedFile) {
       setFiles((prev) => [...prev, file]);
       return true;
@@ -40,9 +33,7 @@ export function useFile() {
   };
 
   const removeDoc = (file: DocumentFile) => {
-    setFiles((prev) =>
-      prev.filter((f) => f.metadata?.id !== file.metadata?.id),
-    );
+    setFiles((prev) => prev.filter((f) => f.id !== file.id));
   };
 
   const reset = () => {
@@ -53,7 +44,7 @@ export function useFile() {
   const uploadContent = async (
     file: File,
     requestParams: any = {},
-  ): Promise<UploadedFileMeta> => {
+  ): Promise<DocumentFile> => {
     const base64 = await readContent({ file, asUrl: true });
     const uploadAPI = `${backend}/api/chat/upload`;
     const response = await fetch(uploadAPI, {
@@ -64,11 +55,11 @@ export function useFile() {
       body: JSON.stringify({
         ...requestParams,
         base64,
-        filename: file.name,
+        name: file.name,
       }),
     });
     if (!response.ok) throw new Error("Failed to upload document.");
-    return (await response.json()) as UploadedFileMeta;
+    return (await response.json()) as DocumentFile;
   };
 
   const getAnnotations = () => {
@@ -114,13 +105,7 @@ export function useFile() {
 
     const filetype = docMineTypeMap[file.type];
     if (!filetype) throw new Error("Unsupported document type.");
-    const uploadedFileMeta = await uploadContent(file, requestParams);
-    const newDoc: DocumentFile = {
-      filename: file.name,
-      filesize: file.size,
-      filetype,
-      metadata: uploadedFileMeta,
-    };
+    const newDoc = await uploadContent(file, requestParams);
     return addDoc(newDoc);
   };
 
diff --git a/templates/types/streaming/nextjs/app/components/ui/chat/index.ts b/templates/types/streaming/nextjs/app/components/ui/chat/index.ts
index eedd4ceb25763e8d2b2da43f31454e52700f47fe..b88dfd423b52f293ecbb6c119717f7f781000173 100644
--- a/templates/types/streaming/nextjs/app/components/ui/chat/index.ts
+++ b/templates/types/streaming/nextjs/app/components/ui/chat/index.ts
@@ -27,18 +27,13 @@ export const DOCUMENT_FILE_TYPES: DocumentFileType[] = [
   "docx",
 ];
 
-export type UploadedFileMeta = {
-  id: string;
-  name: string; // The uploaded file name in the backend (including uuid and sanitized)
-  url?: string;
-  refs?: string[];
-};
-
 export type DocumentFile = {
-  filename: string; // The original file name
-  filesize: number;
-  filetype: DocumentFileType;
-  metadata?: UploadedFileMeta; // undefined when the file is not uploaded yet
+  id: string;
+  name: string; // The uploaded file name in the backend
+  size: number; // The file size in bytes
+  type: DocumentFileType;
+  url: string; // The URL of the uploaded file in the backend
+  refs?: string[]; // DocumentIDs of the uploaded file in the vector index
 };
 
 export type DocumentFileData = {
diff --git a/templates/types/streaming/nextjs/app/components/ui/document-preview.tsx b/templates/types/streaming/nextjs/app/components/ui/document-preview.tsx
index b0f9bd900dde1ec0319476250aae2afdc92c9272..ee3059a05e03864a04a45074c3e951598ae9dd36 100644
--- a/templates/types/streaming/nextjs/app/components/ui/document-preview.tsx
+++ b/templates/types/streaming/nextjs/app/components/ui/document-preview.tsx
@@ -23,11 +23,11 @@ export interface DocumentPreviewProps {
 }
 
 export function DocumentPreview(props: DocumentPreviewProps) {
-  const { filename, filesize, filetype, metadata } = props.file;
+  const { name, size, type, refs } = props.file;
 
-  if (metadata?.refs?.length) {
+  if (refs?.length) {
     return (
-      <div title={`Document IDs: ${metadata.refs.join(", ")}`}>
+      <div title={`Document IDs: ${refs.join(", ")}`}>
         <PreviewCard {...props} />
       </div>
     );
@@ -43,9 +43,9 @@ export function DocumentPreview(props: DocumentPreviewProps) {
       <DrawerContent className="w-3/5 mt-24 h-full max-h-[96%] ">
         <DrawerHeader className="flex justify-between">
           <div className="space-y-2">
-            <DrawerTitle>{filetype.toUpperCase()} Raw Content</DrawerTitle>
+            <DrawerTitle>{type.toUpperCase()} Raw Content</DrawerTitle>
             <DrawerDescription>
-              {filename} ({inKB(filesize)} KB)
+              {name} ({inKB(size)} KB)
             </DrawerDescription>
           </div>
           <DrawerClose asChild>
@@ -53,9 +53,9 @@ export function DocumentPreview(props: DocumentPreviewProps) {
           </DrawerClose>
         </DrawerHeader>
         <div className="m-4 max-h-[80%] overflow-auto">
-          {metadata?.refs?.length && (
+          {refs?.length && (
             <pre className="bg-secondary rounded-md p-4 block text-sm">
-              {metadata.refs.join(", ")}
+              {refs.join(", ")}
             </pre>
           )}
         </div>
@@ -73,9 +73,9 @@ export const FileIcon: Record<DocumentFileType, string> = {
 
 export function PreviewCard(props: {
   file: {
-    filename: string;
-    filesize?: number;
-    filetype?: DocumentFileType;
+    name: string;
+    size?: number;
+    type: DocumentFileType;
   };
   onRemove?: () => void;
   className?: string;
@@ -93,17 +93,17 @@ export function PreviewCard(props: {
           <Image
             className="h-full w-auto object-contain"
             priority
-            src={FileIcon[file.filetype || "txt"]}
+            src={FileIcon[file.type]}
             alt="Icon"
           />
         </div>
         <div className="overflow-hidden">
           <div className="truncate font-semibold">
-            {file.filename} {file.filesize ? `(${inKB(file.filesize)} KB)` : ""}
+            {file.name} {file.size ? `(${inKB(file.size)} KB)` : ""}
           </div>
-          {file.filetype && (
+          {file.type && (
             <div className="truncate text-token-text-tertiary flex items-center gap-2">
-              <span>{file.filetype.toUpperCase()} File</span>
+              <span>{file.type.toUpperCase()} File</span>
             </div>
           )}
         </div>