diff --git a/templates/types/streaming/express/src/controllers/chat.controller.ts b/templates/types/streaming/express/src/controllers/chat.controller.ts
index beb8d7f173a32b37e71e7bdbfc8217d35458b973..f88397919c8c0ae55a5b610fcca53f41665070f3 100644
--- a/templates/types/streaming/express/src/controllers/chat.controller.ts
+++ b/templates/types/streaming/express/src/controllers/chat.controller.ts
@@ -2,11 +2,7 @@ import { Message, StreamData, streamToResponse } from "ai";
 import { Request, Response } from "express";
 import { ChatMessage, Settings } from "llamaindex";
 import { createChatEngine } from "./engine/chat";
-import {
-  DataParserOptions,
-  LlamaIndexStream,
-  convertMessageContent,
-} from "./llamaindex-stream";
+import { LlamaIndexStream, convertMessageContent } from "./llamaindex-stream";
 import { createCallbackManager, createStreamTimeout } from "./stream-helper";
 
 export const chat = async (req: Request, res: Response) => {
@@ -14,10 +10,7 @@ export const chat = async (req: Request, res: Response) => {
   const vercelStreamData = new StreamData();
   const streamTimeout = createStreamTimeout(vercelStreamData);
   try {
-    const {
-      messages,
-      data,
-    }: { messages: Message[]; data: DataParserOptions | undefined } = req.body;
+    const { messages }: { messages: Message[] } = req.body;
     const userMessage = messages.pop();
     if (!messages || !userMessage || userMessage.role !== "user") {
       return res.status(400).json({
@@ -28,8 +21,24 @@ export const chat = async (req: Request, res: Response) => {
 
     const chatEngine = await createChatEngine();
 
+    let annotations = userMessage.annotations;
+    if (!annotations) {
+      // the user didn't send any new annotations with the last message
+      // so use the annotations from the last user message that has annotations
+      // REASON: GPT4 doesn't consider MessageContentDetail from previous messages, only strings
+      annotations = messages
+        .slice()
+        .reverse()
+        .find(
+          (message) => message.role === "user" && message.annotations,
+        )?.annotations;
+    }
+
     // Convert message content from Vercel/AI format to LlamaIndex/OpenAI format
-    const userMessageContent = convertMessageContent(userMessage.content, data);
+    const userMessageContent = convertMessageContent(
+      userMessage.content,
+      annotations,
+    );
 
     // Setup callbacks
     const callbackManager = createCallbackManager(vercelStreamData);
@@ -44,12 +53,7 @@ export const chat = async (req: Request, res: Response) => {
     });
 
     // Return a stream, which can be consumed by the Vercel/AI client
-    const stream = LlamaIndexStream(response, vercelStreamData, {
-      parserOptions: {
-        imageUrl: data?.imageUrl,
-        csvFiles: data?.csvFiles,
-      },
-    });
+    const stream = LlamaIndexStream(response, vercelStreamData);
 
     return streamToResponse(stream, res, {}, vercelStreamData);
   } catch (error) {
diff --git a/templates/types/streaming/express/src/controllers/llamaindex-stream.ts b/templates/types/streaming/express/src/controllers/llamaindex-stream.ts
index a8c055a4538409ba8400fe4c29a765ac4f6fe470..f2f4b5a33f60aedb4fb8598e3e2757328280af7c 100644
--- a/templates/types/streaming/express/src/controllers/llamaindex-stream.ts
+++ b/templates/types/streaming/express/src/controllers/llamaindex-stream.ts
@@ -1,4 +1,5 @@
 import {
+  JSONValue,
   StreamData,
   createCallbacksTransformer,
   createStreamDataTransformer,
@@ -7,6 +8,7 @@ import {
 } from "ai";
 import {
   MessageContent,
+  MessageContentDetail,
   Metadata,
   NodeWithScore,
   Response,
@@ -20,42 +22,66 @@ type LlamaIndexResponse =
   | AgentStreamChatResponse<ToolCallLLMMessageOptions>
   | Response;
 
-export type DataParserOptions = {
-  imageUrl?: string;
-  csvFiles?: CsvFile[];
-};
-
 export const convertMessageContent = (
-  textMessage: string,
-  additionalData?: DataParserOptions,
+  content: string,
+  annotations?: JSONValue[],
 ): MessageContent => {
-  if (!additionalData) return textMessage;
-  const content: MessageContent = [
+  if (!annotations) return content;
+  return [
     {
       type: "text",
-      text: textMessage,
+      text: content,
     },
+    ...convertAnnotations(annotations),
   ];
-  if (additionalData?.imageUrl) {
-    content.push({
-      type: "image_url",
-      image_url: {
-        url: additionalData?.imageUrl,
-      },
-    });
-  }
+};
 
-  if (additionalData?.csvFiles?.length) {
-    const rawContents = additionalData.csvFiles.map((csv) => {
-      return "```csv\n" + csv.content + "\n```";
-    });
-    const csvContent =
-      "Use data from following CSV raw contents:\n" + rawContents.join("\n\n");
-    content.push({
-      type: "text",
-      text: `${csvContent}\n\n${textMessage}`,
-    });
-  }
+const convertAnnotations = (
+  annotations: JSONValue[],
+): MessageContentDetail[] => {
+  const content: MessageContentDetail[] = [];
+  annotations.forEach((annotation: JSONValue) => {
+    // first skip invalid annotation
+    if (
+      !(
+        annotation &&
+        typeof annotation === "object" &&
+        "type" in annotation &&
+        "data" in annotation &&
+        annotation.data &&
+        typeof annotation.data === "object"
+      )
+    ) {
+      console.log(
+        "Client sent invalid annotation. Missing data and type",
+        annotation,
+      );
+      return;
+    }
+    const { type, data } = annotation;
+    // convert image
+    if (type === "image" && "url" in data && typeof data.url === "string") {
+      content.push({
+        type: "image_url",
+        image_url: {
+          url: data.url,
+        },
+      });
+    }
+    // convert CSV files to text
+    if (type === "csv" && "csvFiles" in data && Array.isArray(data.csvFiles)) {
+      const rawContents = data.csvFiles.map((csv) => {
+        return "```csv\n" + (csv as CsvFile).content + "\n```";
+      });
+      const csvContent =
+        "Use data from following CSV raw contents:\n" +
+        rawContents.join("\n\n");
+      content.push({
+        type: "text",
+        text: csvContent,
+      });
+    }
+  });
 
   return content;
 };
@@ -63,7 +89,6 @@ export const convertMessageContent = (
 function createParser(
   res: AsyncIterable<LlamaIndexResponse>,
   data: StreamData,
-  opts?: DataParserOptions,
 ) {
   const it = res[Symbol.asyncIterator]();
   const trimStartOfStream = trimStartOfStreamHelper();
@@ -106,10 +131,9 @@ export function LlamaIndexStream(
   data: StreamData,
   opts?: {
     callbacks?: AIStreamCallbacksAndOptions;
-    parserOptions?: DataParserOptions;
   },
 ): ReadableStream<Uint8Array> {
-  return createParser(response, data, opts?.parserOptions)
+  return createParser(response, data)
     .pipeThrough(createCallbacksTransformer(opts?.callbacks))
     .pipeThrough(createStreamDataTransformer());
 }
diff --git a/templates/types/streaming/fastapi/app/api/routers/models.py b/templates/types/streaming/fastapi/app/api/routers/models.py
index 5b1ebca5bb6b3f8b040c47237b2e49b002ddb359..d870b091453b517181a27bfa552492f92a2fda70 100644
--- a/templates/types/streaming/fastapi/app/api/routers/models.py
+++ b/templates/types/streaming/fastapi/app/api/routers/models.py
@@ -10,11 +10,6 @@ from llama_index.core.llms import ChatMessage, MessageRole
 logger = logging.getLogger("uvicorn")
 
 
-class Message(BaseModel):
-    role: MessageRole
-    content: str
-
-
 class CsvFile(BaseModel):
     content: str
     filename: str
@@ -23,7 +18,7 @@ class CsvFile(BaseModel):
     type: str
 
 
-class DataParserOptions(BaseModel):
+class AnnotationData(BaseModel):
     csv_files: List[CsvFile] | None = Field(
         default=None,
         description="List of CSV files",
@@ -47,15 +42,23 @@ class DataParserOptions(BaseModel):
 
     def to_raw_content(self) -> str:
         if self.csv_files is not None and len(self.csv_files) > 0:
-            return "Use data from following CSV raw contents" + "\n".join(
+            return "Use data from following CSV raw contents\n" + "\n".join(
                 [f"```csv\n{csv_file.content}\n```" for csv_file in self.csv_files]
             )
 
 
+class Annotation(BaseModel):
+    type: str
+    data: AnnotationData
+
+
+class Message(BaseModel):
+    role: MessageRole
+    content: str
+    annotations: List[Annotation] | None = None
+
+
 class ChatData(BaseModel):
-    data: DataParserOptions | None = Field(
-        default=None,
-    )
     messages: List[Message]
 
     class Config:
@@ -78,11 +81,22 @@ class ChatData(BaseModel):
 
     def get_last_message_content(self) -> str:
         """
-        Get the content of the last message along with the data content if available
+        Get the content of the last message along with the data content if available. Fallback to use data content from previous messages
         """
-        message_content = self.messages[-1].content
-        if self.data:
-            message_content += "\n" + self.data.to_raw_content()
+        if len(self.messages) == 0:
+            raise ValueError("There is not any message in the chat")
+        last_message = self.messages[-1]
+        message_content = last_message.content
+        for message in reversed(self.messages):
+            if message.role == MessageRole.USER and message.annotations is not None:
+                annotation_text = "\n".join(
+                    [
+                        annotation.data.to_raw_content()
+                        for annotation in message.annotations
+                    ]
+                )
+                message_content = f"{message_content}\n{annotation_text}"
+                break
         return message_content
 
     def get_history_messages(self) -> List[Message]:
diff --git a/templates/types/streaming/nextjs/app/api/chat/llamaindex-stream.ts b/templates/types/streaming/nextjs/app/api/chat/llamaindex-stream.ts
index a8c055a4538409ba8400fe4c29a765ac4f6fe470..f2f4b5a33f60aedb4fb8598e3e2757328280af7c 100644
--- a/templates/types/streaming/nextjs/app/api/chat/llamaindex-stream.ts
+++ b/templates/types/streaming/nextjs/app/api/chat/llamaindex-stream.ts
@@ -1,4 +1,5 @@
 import {
+  JSONValue,
   StreamData,
   createCallbacksTransformer,
   createStreamDataTransformer,
@@ -7,6 +8,7 @@ import {
 } from "ai";
 import {
   MessageContent,
+  MessageContentDetail,
   Metadata,
   NodeWithScore,
   Response,
@@ -20,42 +22,66 @@ type LlamaIndexResponse =
   | AgentStreamChatResponse<ToolCallLLMMessageOptions>
   | Response;
 
-export type DataParserOptions = {
-  imageUrl?: string;
-  csvFiles?: CsvFile[];
-};
-
 export const convertMessageContent = (
-  textMessage: string,
-  additionalData?: DataParserOptions,
+  content: string,
+  annotations?: JSONValue[],
 ): MessageContent => {
-  if (!additionalData) return textMessage;
-  const content: MessageContent = [
+  if (!annotations) return content;
+  return [
     {
       type: "text",
-      text: textMessage,
+      text: content,
     },
+    ...convertAnnotations(annotations),
   ];
-  if (additionalData?.imageUrl) {
-    content.push({
-      type: "image_url",
-      image_url: {
-        url: additionalData?.imageUrl,
-      },
-    });
-  }
+};
 
-  if (additionalData?.csvFiles?.length) {
-    const rawContents = additionalData.csvFiles.map((csv) => {
-      return "```csv\n" + csv.content + "\n```";
-    });
-    const csvContent =
-      "Use data from following CSV raw contents:\n" + rawContents.join("\n\n");
-    content.push({
-      type: "text",
-      text: `${csvContent}\n\n${textMessage}`,
-    });
-  }
+const convertAnnotations = (
+  annotations: JSONValue[],
+): MessageContentDetail[] => {
+  const content: MessageContentDetail[] = [];
+  annotations.forEach((annotation: JSONValue) => {
+    // first skip invalid annotation
+    if (
+      !(
+        annotation &&
+        typeof annotation === "object" &&
+        "type" in annotation &&
+        "data" in annotation &&
+        annotation.data &&
+        typeof annotation.data === "object"
+      )
+    ) {
+      console.log(
+        "Client sent invalid annotation. Missing data and type",
+        annotation,
+      );
+      return;
+    }
+    const { type, data } = annotation;
+    // convert image
+    if (type === "image" && "url" in data && typeof data.url === "string") {
+      content.push({
+        type: "image_url",
+        image_url: {
+          url: data.url,
+        },
+      });
+    }
+    // convert CSV files to text
+    if (type === "csv" && "csvFiles" in data && Array.isArray(data.csvFiles)) {
+      const rawContents = data.csvFiles.map((csv) => {
+        return "```csv\n" + (csv as CsvFile).content + "\n```";
+      });
+      const csvContent =
+        "Use data from following CSV raw contents:\n" +
+        rawContents.join("\n\n");
+      content.push({
+        type: "text",
+        text: csvContent,
+      });
+    }
+  });
 
   return content;
 };
@@ -63,7 +89,6 @@ export const convertMessageContent = (
 function createParser(
   res: AsyncIterable<LlamaIndexResponse>,
   data: StreamData,
-  opts?: DataParserOptions,
 ) {
   const it = res[Symbol.asyncIterator]();
   const trimStartOfStream = trimStartOfStreamHelper();
@@ -106,10 +131,9 @@ export function LlamaIndexStream(
   data: StreamData,
   opts?: {
     callbacks?: AIStreamCallbacksAndOptions;
-    parserOptions?: DataParserOptions;
   },
 ): ReadableStream<Uint8Array> {
-  return createParser(response, data, opts?.parserOptions)
+  return createParser(response, data)
     .pipeThrough(createCallbacksTransformer(opts?.callbacks))
     .pipeThrough(createStreamDataTransformer());
 }
diff --git a/templates/types/streaming/nextjs/app/api/chat/route.ts b/templates/types/streaming/nextjs/app/api/chat/route.ts
index 223917bc251b2cf58c1dacec888ad192f4f18fa8..d18b3fdb6bcf03e8db512cf998500620a7e9861e 100644
--- a/templates/types/streaming/nextjs/app/api/chat/route.ts
+++ b/templates/types/streaming/nextjs/app/api/chat/route.ts
@@ -4,11 +4,7 @@ import { ChatMessage, Settings } from "llamaindex";
 import { NextRequest, NextResponse } from "next/server";
 import { createChatEngine } from "./engine/chat";
 import { initSettings } from "./engine/settings";
-import {
-  DataParserOptions,
-  LlamaIndexStream,
-  convertMessageContent,
-} from "./llamaindex-stream";
+import { LlamaIndexStream, convertMessageContent } from "./llamaindex-stream";
 import { createCallbackManager, createStreamTimeout } from "./stream-helper";
 
 initObservability();
@@ -24,10 +20,7 @@ export async function POST(request: NextRequest) {
 
   try {
     const body = await request.json();
-    const {
-      messages,
-      data,
-    }: { messages: Message[]; data: DataParserOptions | undefined } = body;
+    const { messages }: { messages: Message[] } = body;
     const userMessage = messages.pop();
     if (!messages || !userMessage || userMessage.role !== "user") {
       return NextResponse.json(
@@ -41,8 +34,24 @@ export async function POST(request: NextRequest) {
 
     const chatEngine = await createChatEngine();
 
+    let annotations = userMessage.annotations;
+    if (!annotations) {
+      // the user didn't send any new annotations with the last message
+      // so use the annotations from the last user message that has annotations
+      // REASON: GPT4 doesn't consider MessageContentDetail from previous messages, only strings
+      annotations = messages
+        .slice()
+        .reverse()
+        .find(
+          (message) => message.role === "user" && message.annotations,
+        )?.annotations;
+    }
+
     // Convert message content from Vercel/AI format to LlamaIndex/OpenAI format
-    const userMessageContent = convertMessageContent(userMessage.content, data);
+    const userMessageContent = convertMessageContent(
+      userMessage.content,
+      annotations,
+    );
 
     // Setup callbacks
     const callbackManager = createCallbackManager(vercelStreamData);
@@ -57,12 +66,7 @@ export async function POST(request: NextRequest) {
     });
 
     // Transform LlamaIndex stream to Vercel/AI format
-    const stream = LlamaIndexStream(response, vercelStreamData, {
-      parserOptions: {
-        imageUrl: data?.imageUrl,
-        csvFiles: data?.csvFiles,
-      },
-    });
+    const stream = LlamaIndexStream(response, vercelStreamData);
 
     // Return a StreamingTextResponse, which can be consumed by the Vercel/AI client
     return new StreamingTextResponse(stream, {}, vercelStreamData);
diff --git a/templates/types/streaming/nextjs/app/components/ui/chat/chat-input.tsx b/templates/types/streaming/nextjs/app/components/ui/chat/chat-input.tsx
index 531361beffa0ef2d1f3f4f5ef97ba37970b06b20..8ee4ec3740d0ec089abf9fd592da3449542ee30e 100644
--- a/templates/types/streaming/nextjs/app/components/ui/chat/chat-input.tsx
+++ b/templates/types/streaming/nextjs/app/components/ui/chat/chat-input.tsx
@@ -1,5 +1,4 @@
 import { JSONValue } from "ai";
-import { Loader2 } from "lucide-react";
 import { useState } from "react";
 import { v4 as uuidv4 } from "uuid";
 import { MessageAnnotation, MessageAnnotationType } from ".";
@@ -26,12 +25,10 @@ export default function ChatInput(
   >,
 ) {
   const [imageUrl, setImageUrl] = useState<string | null>(null);
-  const { files, uploadNew, removeFile, resetUploadedFiles } = useCsv(
-    props.messages,
-  );
+  const { files: csvFiles, upload, remove, reset } = useCsv();
 
-  const getAttachments = () => {
-    if (!imageUrl && files.length === 0) return undefined;
+  const getAnnotations = () => {
+    if (!imageUrl && csvFiles.length === 0) return undefined;
     const annotations: MessageAnnotation[] = [];
     if (imageUrl) {
       annotations.push({
@@ -39,16 +36,15 @@ export default function ChatInput(
         data: { url: imageUrl },
       });
     }
-    if (files.length > 0) {
+    if (csvFiles.length > 0) {
       annotations.push({
         type: MessageAnnotationType.CSV,
         data: {
-          csvFiles: files.map((file) => ({
+          csvFiles: csvFiles.map((file) => ({
             id: file.id,
             content: file.content,
             filename: file.filename,
             filesize: file.filesize,
-            type: "available",
           })),
         },
       });
@@ -58,31 +54,26 @@ export default function ChatInput(
 
   // default submit function does not handle including annotations in the message
   // so we need to use append function to submit new message with annotations
-  const submitWithAttachment = (
+  const handleSubmitWithAnnotations = (
     e: React.FormEvent<HTMLFormElement>,
-    attachments: JSONValue[] | undefined,
+    annotations: JSONValue[] | undefined,
   ) => {
     e.preventDefault();
-    props.append!(
-      {
-        content: props.input,
-        role: "user",
-        createdAt: new Date(),
-        annotations: attachments,
-      },
-      {
-        data: { imageUrl, csvFiles: files },
-      },
-    );
-    setImageUrl(null);
-    resetUploadedFiles();
+    props.append!({
+      content: props.input,
+      role: "user",
+      createdAt: new Date(),
+      annotations,
+    });
     props.setInput!("");
   };
 
   const onSubmit = (e: React.FormEvent<HTMLFormElement>) => {
-    const attachments = getAttachments();
-    if (attachments) {
-      submitWithAttachment(e, attachments);
+    const annotations = getAnnotations();
+    if (annotations) {
+      handleSubmitWithAnnotations(e, annotations);
+      imageUrl && setImageUrl(null);
+      csvFiles.length && reset();
       return;
     }
     props.handleSubmit(e);
@@ -107,7 +98,7 @@ export default function ChatInput(
       reader.onload = () => resolve(reader.result as string);
       reader.onerror = (error) => reject(error);
     });
-    const isSuccess = uploadNew({
+    const isSuccess = upload({
       id: uuidv4(),
       content,
       filename: file.name,
@@ -124,7 +115,7 @@ export default function ChatInput(
         return await handleUploadImageFile(file);
       }
       if (file.type === "text/csv") {
-        if (files.length > 0) {
+        if (csvFiles.length > 0) {
           alert("You can only upload one csv file at a time.");
           return;
         }
@@ -144,27 +135,17 @@ export default function ChatInput(
       {imageUrl && (
         <UploadImagePreview url={imageUrl} onRemove={onRemovePreviewImage} />
       )}
-      {files.length > 0 && (
+      {csvFiles.length > 0 && (
         <div className="flex gap-4 w-full overflow-auto py-2">
-          {props.isLoading ? (
-            <div className="flex gap-2 items-center">
-              <Loader2 className="h-4 w-4 animate-spin" />{" "}
-              <span>Handling csv files...</span>
-            </div>
-          ) : (
-            <>
-              {files.map((csv) => {
-                return (
-                  <UploadCsvPreview
-                    key={csv.id}
-                    csv={csv}
-                    onRemove={() => removeFile(csv)}
-                    isNew={csv.type === "new_upload"}
-                  />
-                );
-              })}
-            </>
-          )}
+          {csvFiles.map((csv) => {
+            return (
+              <UploadCsvPreview
+                key={csv.id}
+                csv={csv}
+                onRemove={() => remove(csv)}
+              />
+            );
+          })}
         </div>
       )}
       <div className="flex w-full items-start justify-between gap-4 ">
diff --git a/templates/types/streaming/nextjs/app/components/ui/chat/use-csv.ts b/templates/types/streaming/nextjs/app/components/ui/chat/use-csv.ts
index 7c51f444574d632eadf855359a3402bd612a699b..afdbacaa3458405938924db8bc76d56670c7953d 100644
--- a/templates/types/streaming/nextjs/app/components/ui/chat/use-csv.ts
+++ b/templates/types/streaming/nextjs/app/components/ui/chat/use-csv.ts
@@ -1,31 +1,10 @@
 "use client";
 
-import { Message } from "ai";
-import { useEffect, useMemo, useState } from "react";
-import {
-  CsvData,
-  CsvFile,
-  MessageAnnotation,
-  MessageAnnotationType,
-  getAnnotationData,
-} from ".";
+import { useState } from "react";
+import { CsvFile } from ".";
 
-interface FrontendCSVData extends CsvFile {
-  type: "available" | "new_upload";
-}
-
-export function useCsv(messages: Message[]) {
-  const [availableFiles, setAvailableFiles] = useState<FrontendCSVData[]>([]);
-  const [uploadedFiles, setUploadedFiles] = useState<FrontendCSVData[]>([]);
-
-  const files = useMemo(() => {
-    return [...availableFiles, ...uploadedFiles];
-  }, [availableFiles, uploadedFiles]);
-
-  useEffect(() => {
-    const items = getAvailableCsvFiles(messages);
-    setAvailableFiles(items.map((data) => ({ ...data, type: "available" })));
-  }, [messages]);
+export function useCsv() {
+  const [files, setFiles] = useState<CsvFile[]>([]);
 
   const csvEqual = (a: CsvFile, b: CsvFile) => {
     if (a.id === b.id) return true;
@@ -33,53 +12,22 @@ export function useCsv(messages: Message[]) {
     return false;
   };
 
-  // Get available csv files from annotations chat history
-  // returns the unique csv files by id
-  const getAvailableCsvFiles = (messages: Message[]): Array<CsvFile> => {
-    const docHash: Record<string, CsvFile> = {};
-    messages.forEach((message) => {
-      if (message.annotations) {
-        const csvData = getAnnotationData<CsvData>(
-          message.annotations as MessageAnnotation[],
-          MessageAnnotationType.CSV,
-        );
-        csvData.forEach((data) => {
-          data.csvFiles.forEach((file) => {
-            if (!docHash[file.id]) {
-              docHash[file.id] = file;
-            }
-          });
-        });
-      }
-    });
-    return Object.values(docHash);
-  };
-
-  const uploadNew = (file: CsvFile) => {
+  const upload = (file: CsvFile) => {
     const existedCsv = files.find((f) => csvEqual(f, file));
     if (!existedCsv) {
-      setUploadedFiles((prev) => [...prev, { ...file, type: "new_upload" }]);
+      setFiles((prev) => [...prev, file]);
       return true;
     }
     return false;
   };
 
-  const removeFile = (file: FrontendCSVData) => {
-    if (file.type === "new_upload") {
-      setUploadedFiles((prev) => prev.filter((f) => f.id !== file.id));
-    } else {
-      setAvailableFiles((prev) => prev.filter((f) => f.id !== file.id));
-    }
+  const remove = (file: CsvFile) => {
+    setFiles((prev) => prev.filter((f) => f.id !== file.id));
   };
 
-  const resetUploadedFiles = () => {
-    setUploadedFiles([]);
+  const reset = () => {
+    setFiles([]);
   };
 
-  return {
-    files,
-    uploadNew,
-    removeFile,
-    resetUploadedFiles,
-  };
+  return { files, upload, remove, reset };
 }
diff --git a/templates/types/streaming/nextjs/app/components/ui/upload-csv-preview.tsx b/templates/types/streaming/nextjs/app/components/ui/upload-csv-preview.tsx
index 767ed63c3a0f352345c5eda08021aa32aaabd70b..db540975ce2913a34e6b4aff2e7a6e1132e095ef 100644
--- a/templates/types/streaming/nextjs/app/components/ui/upload-csv-preview.tsx
+++ b/templates/types/streaming/nextjs/app/components/ui/upload-csv-preview.tsx
@@ -17,7 +17,6 @@ import { cn } from "./lib/utils";
 export interface UploadCsvPreviewProps {
   csv: CsvFile;
   onRemove?: () => void;
-  isNew?: boolean;
 }
 
 export default function UploadCsvPreview(props: UploadCsvPreviewProps) {
@@ -52,7 +51,7 @@ export default function UploadCsvPreview(props: UploadCsvPreviewProps) {
 }
 
 function CSVSummaryCard(props: UploadCsvPreviewProps) {
-  const { onRemove, isNew, csv } = props;
+  const { onRemove, csv } = props;
   return (
     <div className="p-2 w-60 max-w-60 bg-secondary rounded-lg text-sm relative cursor-pointer">
       <div className="flex flex-row items-center gap-2">
@@ -70,11 +69,6 @@ function CSVSummaryCard(props: UploadCsvPreviewProps) {
           </div>
           <div className="truncate text-token-text-tertiary flex items-center gap-2">
             <span>Spreadsheet</span>
-            {isNew && (
-              <span className="px-2 py-0.5 bg-red-400 text-white text-xs rounded-2xl">
-                new
-              </span>
-            )}
           </div>
         </div>
       </div>