From cfb5257a1ee0f34072c282f7552f45f7dff453a2 Mon Sep 17 00:00:00 2001
From: Thuc Pham <51660321+thucpn@users.noreply.github.com>
Date: Wed, 24 Apr 2024 14:20:58 +0700
Subject: [PATCH] feat: display chat events (#52)

---------

Co-authored-by: Marcus Schiesser <mail@marcusschiesser.de>
Co-authored-by: leehuwuj <leehuwuj@gmail.com>
---
 .changeset/spicy-moose-allow.md               |  5 +
 .../src/controllers/chat.controller.ts        | 29 ++++--
 .../src/controllers/llamaindex-stream.ts      | 30 +-----
 .../express/src/controllers/stream-helper.ts  | 39 ++++++++
 .../streaming/fastapi/app/api/routers/chat.py | 37 ++++++--
 .../fastapi/app/api/routers/messaging.py      | 92 +++++++++++++++++++
 .../types/streaming/fastapi/pyproject.toml    |  1 +
 .../nextjs/app/api/chat/llamaindex-stream.ts  | 30 +-----
 .../streaming/nextjs/app/api/chat/route.ts    | 25 ++++-
 .../nextjs/app/api/chat/stream-helper.ts      | 39 ++++++++
 .../app/components/ui/chat/chat-events.tsx    | 60 ++++++++++++
 .../app/components/ui/chat/chat-message.tsx   | 43 +++++++--
 .../app/components/ui/chat/chat-messages.tsx  |  2 +-
 .../nextjs/app/components/ui/chat/index.ts    |  8 +-
 .../nextjs/app/components/ui/collapsible.tsx  | 11 +++
 templates/types/streaming/nextjs/package.json |  1 +
 16 files changed, 367 insertions(+), 85 deletions(-)
 create mode 100644 .changeset/spicy-moose-allow.md
 create mode 100644 templates/types/streaming/express/src/controllers/stream-helper.ts
 create mode 100644 templates/types/streaming/fastapi/app/api/routers/messaging.py
 create mode 100644 templates/types/streaming/nextjs/app/api/chat/stream-helper.ts
 create mode 100644 templates/types/streaming/nextjs/app/components/ui/chat/chat-events.tsx
 create mode 100644 templates/types/streaming/nextjs/app/components/ui/collapsible.tsx

diff --git a/.changeset/spicy-moose-allow.md b/.changeset/spicy-moose-allow.md
new file mode 100644
index 00000000..5c784e72
--- /dev/null
+++ b/.changeset/spicy-moose-allow.md
@@ -0,0 +1,5 @@
+---
+"create-llama": patch
+---
+
+Display events (e.g. retrieving nodes) per chat message
diff --git a/templates/types/streaming/express/src/controllers/chat.controller.ts b/templates/types/streaming/express/src/controllers/chat.controller.ts
index f200628a..bb77c6e5 100644
--- a/templates/types/streaming/express/src/controllers/chat.controller.ts
+++ b/templates/types/streaming/express/src/controllers/chat.controller.ts
@@ -1,8 +1,9 @@
-import { streamToResponse } from "ai";
+import { Message, StreamData, streamToResponse } from "ai";
 import { Request, Response } from "express";
-import { ChatMessage, MessageContent } from "llamaindex";
+import { ChatMessage, MessageContent, Settings } from "llamaindex";
 import { createChatEngine } from "./engine/chat";
 import { LlamaIndexStream } from "./llamaindex-stream";
+import { appendEventData } from "./stream-helper";
 
 const convertMessageContent = (
   textMessage: string,
@@ -25,7 +26,7 @@ const convertMessageContent = (
 
 export const chat = async (req: Request, res: Response) => {
   try {
-    const { messages, data }: { messages: ChatMessage[]; data: any } = req.body;
+    const { messages, data }: { messages: Message[]; data: any } = req.body;
     const userMessage = messages.pop();
     if (!messages || !userMessage || userMessage.role !== "user") {
       return res.status(400).json({
@@ -42,22 +43,38 @@ export const chat = async (req: Request, res: Response) => {
       data?.imageUrl,
     );
 
+    // Init Vercel AI StreamData
+    const vercelStreamData = new StreamData();
+    appendEventData(
+      vercelStreamData,
+      `Retrieving context for query: '${userMessage.content}'`,
+    );
+
+    // Setup callback for streaming data before chatting
+    Settings.callbackManager.on("retrieve", (data) => {
+      const { nodes } = data.detail;
+      appendEventData(
+        vercelStreamData,
+        `Retrieved ${nodes.length} sources to use as context for the query`,
+      );
+    });
+
     // Calling LlamaIndex's ChatEngine to get a streamed response
     const response = await chatEngine.chat({
       message: userMessageContent,
-      chatHistory: messages,
+      chatHistory: messages as ChatMessage[],
       stream: true,
     });
 
     // Return a stream, which can be consumed by the Vercel/AI client
-    const { stream, data: streamData } = LlamaIndexStream(response, {
+    const { stream } = LlamaIndexStream(response, vercelStreamData, {
       parserOptions: {
         image_url: data?.imageUrl,
       },
     });
 
     // Pipe LlamaIndexStream to response
-    const processedStream = stream.pipeThrough(streamData.stream);
+    const processedStream = stream.pipeThrough(vercelStreamData.stream);
     return streamToResponse(processedStream, res, {
       headers: {
         // response MUST have the `X-Experimental-Stream-Data: 'true'` header
diff --git a/templates/types/streaming/express/src/controllers/llamaindex-stream.ts b/templates/types/streaming/express/src/controllers/llamaindex-stream.ts
index 866acc8a..b6899057 100644
--- a/templates/types/streaming/express/src/controllers/llamaindex-stream.ts
+++ b/templates/types/streaming/express/src/controllers/llamaindex-stream.ts
@@ -11,38 +11,12 @@ import {
   Response,
   StreamingAgentChatResponse,
 } from "llamaindex";
+import { appendImageData, appendSourceData } from "./stream-helper";
 
 type ParserOptions = {
   image_url?: string;
 };
 
-function appendImageData(data: StreamData, imageUrl?: string) {
-  if (!imageUrl) return;
-  data.appendMessageAnnotation({
-    type: "image",
-    data: {
-      url: imageUrl,
-    },
-  });
-}
-
-function appendSourceData(
-  data: StreamData,
-  sourceNodes?: NodeWithScore<Metadata>[],
-) {
-  if (!sourceNodes?.length) return;
-  data.appendMessageAnnotation({
-    type: "sources",
-    data: {
-      nodes: sourceNodes.map((node) => ({
-        ...node.node.toMutableJSON(),
-        id: node.node.id_,
-        score: node.score ?? null,
-      })),
-    },
-  });
-}
-
 function createParser(
   res: AsyncIterable<Response>,
   data: StreamData,
@@ -79,12 +53,12 @@ function createParser(
 
 export function LlamaIndexStream(
   response: StreamingAgentChatResponse | AsyncIterable<Response>,
+  data: StreamData,
   opts?: {
     callbacks?: AIStreamCallbacksAndOptions;
     parserOptions?: ParserOptions;
   },
 ): { stream: ReadableStream; data: StreamData } {
-  const data = new StreamData();
   const res =
     response instanceof StreamingAgentChatResponse
       ? response.response
diff --git a/templates/types/streaming/express/src/controllers/stream-helper.ts b/templates/types/streaming/express/src/controllers/stream-helper.ts
new file mode 100644
index 00000000..e74597b9
--- /dev/null
+++ b/templates/types/streaming/express/src/controllers/stream-helper.ts
@@ -0,0 +1,39 @@
+import { StreamData } from "ai";
+import { Metadata, NodeWithScore } from "llamaindex";
+
+export function appendImageData(data: StreamData, imageUrl?: string) {
+  if (!imageUrl) return;
+  data.appendMessageAnnotation({
+    type: "image",
+    data: {
+      url: imageUrl,
+    },
+  });
+}
+
+export function appendSourceData(
+  data: StreamData,
+  sourceNodes?: NodeWithScore<Metadata>[],
+) {
+  if (!sourceNodes?.length) return;
+  data.appendMessageAnnotation({
+    type: "sources",
+    data: {
+      nodes: sourceNodes.map((node) => ({
+        ...node.node.toMutableJSON(),
+        id: node.node.id_,
+        score: node.score ?? null,
+      })),
+    },
+  });
+}
+
+export function appendEventData(data: StreamData, title?: string) {
+  if (!title) return;
+  data.appendMessageAnnotation({
+    type: "events",
+    data: {
+      title,
+    },
+  });
+}
diff --git a/templates/types/streaming/fastapi/app/api/routers/chat.py b/templates/types/streaming/fastapi/app/api/routers/chat.py
index 12810f5a..cd287f5f 100644
--- a/templates/types/streaming/fastapi/app/api/routers/chat.py
+++ b/templates/types/streaming/fastapi/app/api/routers/chat.py
@@ -9,6 +9,8 @@ from llama_index.core.schema import NodeWithScore
 from llama_index.core.llms import ChatMessage, MessageRole
 from app.engine import get_chat_engine
 from app.api.routers.vercel_response import VercelStreamResponse
+from app.api.routers.messaging import EventCallbackHandler
+from aiostream import stream
 
 chat_router = r = APIRouter()
 
@@ -46,7 +48,7 @@ class _SourceNodes(BaseModel):
             id=source_node.node.node_id,
             metadata=source_node.node.metadata,
             score=source_node.score,
-            text=source_node.node.text,
+            text=source_node.node.text,  # type: ignore
         )
 
     @classmethod
@@ -92,15 +94,34 @@ async def chat(
 ):
     last_message_content, messages = await parse_chat_data(data)
 
+    event_handler = EventCallbackHandler()
+    chat_engine.callback_manager.handlers.append(event_handler)  # type: ignore
     response = await chat_engine.astream_chat(last_message_content, messages)
 
-    async def event_generator(request: Request, response: StreamingAgentChatResponse):
+    async def content_generator():
         # Yield the text response
-        async for token in response.async_response_gen():
-            # If client closes connection, stop sending events
-            if await request.is_disconnected():
-                break
-            yield VercelStreamResponse.convert_text(token)
+        async def _text_generator():
+            async for token in response.async_response_gen():
+                yield VercelStreamResponse.convert_text(token)
+            # the text_generator is the leading stream, once it's finished, also finish the event stream
+            event_handler.is_done = True
+
+        # Yield the events from the event handler
+        async def _event_generator():
+            async for event in event_handler.async_event_gen():
+                yield VercelStreamResponse.convert_data(
+                    {
+                        "type": "events",
+                        "data": {"title": event.get_title()},
+                    }
+                )
+
+        combine = stream.merge(_text_generator(), _event_generator())
+        async with combine.stream() as streamer:
+            async for item in streamer:
+                if await request.is_disconnected():
+                    break
+                yield item
 
         # Yield the source nodes
         yield VercelStreamResponse.convert_data(
@@ -115,7 +136,7 @@ async def chat(
             }
         )
 
-    return VercelStreamResponse(content=event_generator(request, response))
+    return VercelStreamResponse(content=content_generator())
 
 
 # non-streaming endpoint - delete if not needed
diff --git a/templates/types/streaming/fastapi/app/api/routers/messaging.py b/templates/types/streaming/fastapi/app/api/routers/messaging.py
new file mode 100644
index 00000000..ae5cc193
--- /dev/null
+++ b/templates/types/streaming/fastapi/app/api/routers/messaging.py
@@ -0,0 +1,92 @@
+import asyncio
+from typing import AsyncGenerator, Dict, Any, List, Optional
+
+from llama_index.core.callbacks.base import BaseCallbackHandler
+from llama_index.core.callbacks.schema import CBEventType
+from pydantic import BaseModel
+
+
+class CallbackEvent(BaseModel):
+    event_type: CBEventType
+    payload: Optional[Dict[str, Any]] = None
+    event_id: str = ""
+
+    def get_title(self):
+        # Return as None for the unhandled event types
+        # to avoid showing them in the UI
+        match self.event_type:
+            case "retrieve":
+                if self.payload:
+                    nodes = self.payload.get("nodes")
+                    if nodes:
+                        return f"Retrieved {len(nodes)} sources to use as context for the query"
+                    else:
+                        return f"Retrieving context for query: '{self.payload.get('query_str')}'"
+                else:
+                    return None
+            case _:
+                return None
+
+
+class EventCallbackHandler(BaseCallbackHandler):
+    _aqueue: asyncio.Queue
+    is_done: bool = False
+
+    def __init__(
+        self,
+    ):
+        """Initialize the base callback handler."""
+        ignored_events = [
+            CBEventType.CHUNKING,
+            CBEventType.NODE_PARSING,
+            CBEventType.EMBEDDING,
+            CBEventType.LLM,
+            CBEventType.TEMPLATING,
+        ]
+        super().__init__(ignored_events, ignored_events)
+        self._aqueue = asyncio.Queue()
+
+    def on_event_start(
+        self,
+        event_type: CBEventType,
+        payload: Optional[Dict[str, Any]] = None,
+        event_id: str = "",
+        **kwargs: Any,
+    ) -> str:
+        self._aqueue.put_nowait(
+            CallbackEvent(event_id=event_id, event_type=event_type, payload=payload)
+        )
+
+    def on_event_end(
+        self,
+        event_type: CBEventType,
+        payload: Optional[Dict[str, Any]] = None,
+        event_id: str = "",
+        **kwargs: Any,
+    ) -> None:
+        self._aqueue.put_nowait(
+            CallbackEvent(event_id=event_id, event_type=event_type, payload=payload)
+        )
+
+    def start_trace(self, trace_id: Optional[str] = None) -> None:
+        """No-op."""
+
+    def end_trace(
+        self,
+        trace_id: Optional[str] = None,
+        trace_map: Optional[Dict[str, List[str]]] = None,
+    ) -> None:
+        """No-op."""
+
+    async def async_event_gen(self) -> AsyncGenerator[CallbackEvent, None]:
+        while True:
+            if not self._aqueue.empty() or not self.is_done:
+                try:
+                    event = await asyncio.wait_for(self._aqueue.get(), timeout=0.1)
+                except asyncio.TimeoutError:
+                    if self.is_done:
+                        break
+                    continue
+                yield event
+            else:
+                break
diff --git a/templates/types/streaming/fastapi/pyproject.toml b/templates/types/streaming/fastapi/pyproject.toml
index 93176212..286b7228 100644
--- a/templates/types/streaming/fastapi/pyproject.toml
+++ b/templates/types/streaming/fastapi/pyproject.toml
@@ -13,6 +13,7 @@ python = "^3.11,<3.12"
 fastapi = "^0.109.1"
 uvicorn = { extras = ["standard"], version = "^0.23.2" }
 python-dotenv = "^1.0.0"
+aiostream = "^0.5.2"
 llama-index = "0.10.28"
 llama-index-core = "0.10.28"
 
diff --git a/templates/types/streaming/nextjs/app/api/chat/llamaindex-stream.ts b/templates/types/streaming/nextjs/app/api/chat/llamaindex-stream.ts
index 866acc8a..b6899057 100644
--- a/templates/types/streaming/nextjs/app/api/chat/llamaindex-stream.ts
+++ b/templates/types/streaming/nextjs/app/api/chat/llamaindex-stream.ts
@@ -11,38 +11,12 @@ import {
   Response,
   StreamingAgentChatResponse,
 } from "llamaindex";
+import { appendImageData, appendSourceData } from "./stream-helper";
 
 type ParserOptions = {
   image_url?: string;
 };
 
-function appendImageData(data: StreamData, imageUrl?: string) {
-  if (!imageUrl) return;
-  data.appendMessageAnnotation({
-    type: "image",
-    data: {
-      url: imageUrl,
-    },
-  });
-}
-
-function appendSourceData(
-  data: StreamData,
-  sourceNodes?: NodeWithScore<Metadata>[],
-) {
-  if (!sourceNodes?.length) return;
-  data.appendMessageAnnotation({
-    type: "sources",
-    data: {
-      nodes: sourceNodes.map((node) => ({
-        ...node.node.toMutableJSON(),
-        id: node.node.id_,
-        score: node.score ?? null,
-      })),
-    },
-  });
-}
-
 function createParser(
   res: AsyncIterable<Response>,
   data: StreamData,
@@ -79,12 +53,12 @@ function createParser(
 
 export function LlamaIndexStream(
   response: StreamingAgentChatResponse | AsyncIterable<Response>,
+  data: StreamData,
   opts?: {
     callbacks?: AIStreamCallbacksAndOptions;
     parserOptions?: ParserOptions;
   },
 ): { stream: ReadableStream; data: StreamData } {
-  const data = new StreamData();
   const res =
     response instanceof StreamingAgentChatResponse
       ? response.response
diff --git a/templates/types/streaming/nextjs/app/api/chat/route.ts b/templates/types/streaming/nextjs/app/api/chat/route.ts
index f836cbed..f7afa083 100644
--- a/templates/types/streaming/nextjs/app/api/chat/route.ts
+++ b/templates/types/streaming/nextjs/app/api/chat/route.ts
@@ -1,10 +1,11 @@
 import { initObservability } from "@/app/observability";
-import { Message, StreamingTextResponse } from "ai";
-import { ChatMessage, MessageContent } from "llamaindex";
+import { Message, StreamData, StreamingTextResponse } from "ai";
+import { ChatMessage, MessageContent, Settings } from "llamaindex";
 import { NextRequest, NextResponse } from "next/server";
 import { createChatEngine } from "./engine/chat";
 import { initSettings } from "./engine/settings";
 import { LlamaIndexStream } from "./llamaindex-stream";
+import { appendEventData } from "./stream-helper";
 
 initObservability();
 initSettings();
@@ -54,6 +55,22 @@ export async function POST(request: NextRequest) {
       data?.imageUrl,
     );
 
+    // Init Vercel AI StreamData
+    const vercelStreamData = new StreamData();
+    appendEventData(
+      vercelStreamData,
+      `Retrieving context for query: '${userMessage.content}'`,
+    );
+
+    // Setup callback for streaming data before chatting
+    Settings.callbackManager.on("retrieve", (data) => {
+      const { nodes } = data.detail;
+      appendEventData(
+        vercelStreamData,
+        `Retrieved ${nodes.length} sources to use as context for the query`,
+      );
+    });
+
     // Calling LlamaIndex's ChatEngine to get a streamed response
     const response = await chatEngine.chat({
       message: userMessageContent,
@@ -62,14 +79,14 @@ export async function POST(request: NextRequest) {
     });
 
     // Transform LlamaIndex stream to Vercel/AI format
-    const { stream, data: streamData } = LlamaIndexStream(response, {
+    const { stream } = LlamaIndexStream(response, vercelStreamData, {
       parserOptions: {
         image_url: data?.imageUrl,
       },
     });
 
     // Return a StreamingTextResponse, which can be consumed by the Vercel/AI client
-    return new StreamingTextResponse(stream, {}, streamData);
+    return new StreamingTextResponse(stream, {}, vercelStreamData);
   } catch (error) {
     console.error("[LlamaIndex]", error);
     return NextResponse.json(
diff --git a/templates/types/streaming/nextjs/app/api/chat/stream-helper.ts b/templates/types/streaming/nextjs/app/api/chat/stream-helper.ts
new file mode 100644
index 00000000..e74597b9
--- /dev/null
+++ b/templates/types/streaming/nextjs/app/api/chat/stream-helper.ts
@@ -0,0 +1,39 @@
+import { StreamData } from "ai";
+import { Metadata, NodeWithScore } from "llamaindex";
+
+export function appendImageData(data: StreamData, imageUrl?: string) {
+  if (!imageUrl) return;
+  data.appendMessageAnnotation({
+    type: "image",
+    data: {
+      url: imageUrl,
+    },
+  });
+}
+
+export function appendSourceData(
+  data: StreamData,
+  sourceNodes?: NodeWithScore<Metadata>[],
+) {
+  if (!sourceNodes?.length) return;
+  data.appendMessageAnnotation({
+    type: "sources",
+    data: {
+      nodes: sourceNodes.map((node) => ({
+        ...node.node.toMutableJSON(),
+        id: node.node.id_,
+        score: node.score ?? null,
+      })),
+    },
+  });
+}
+
+export function appendEventData(data: StreamData, title?: string) {
+  if (!title) return;
+  data.appendMessageAnnotation({
+    type: "events",
+    data: {
+      title,
+    },
+  });
+}
diff --git a/templates/types/streaming/nextjs/app/components/ui/chat/chat-events.tsx b/templates/types/streaming/nextjs/app/components/ui/chat/chat-events.tsx
new file mode 100644
index 00000000..96d36fef
--- /dev/null
+++ b/templates/types/streaming/nextjs/app/components/ui/chat/chat-events.tsx
@@ -0,0 +1,60 @@
+import { ChevronDown, ChevronRight, Loader2 } from "lucide-react";
+import { useEffect, useState } from "react";
+import { Button } from "../button";
+import {
+  Collapsible,
+  CollapsibleContent,
+  CollapsibleTrigger,
+} from "../collapsible";
+import { EventData } from "./index";
+
+export function ChatEvents({
+  data,
+  isLoading,
+}: {
+  data: EventData[];
+  isLoading: boolean;
+}) {
+  const [isOpen, setIsOpen] = useState(true);
+
+  useEffect(() => {
+    // Collapse the events when finished streaming
+    if (!isLoading) {
+      setIsOpen(false);
+    }
+  }, [isLoading]);
+
+  const buttonLabel = isLoading
+    ? "In progress"
+    : isOpen
+      ? "Hide events"
+      : "Show events";
+
+  const EventIcon = isLoading ? (
+    <Loader2 className="h-4 w-4 animate-spin" />
+  ) : isOpen ? (
+    <ChevronDown className="h-4 w-4" />
+  ) : (
+    <ChevronRight className="h-4 w-4" />
+  );
+
+  return (
+    <div className="border-l-2 border-indigo-400 pl-2">
+      <Collapsible open={isOpen} onOpenChange={setIsOpen}>
+        <CollapsibleTrigger asChild>
+          <Button variant="secondary" className="space-x-2">
+            <span>{buttonLabel}</span>
+            {EventIcon}
+          </Button>
+        </CollapsibleTrigger>
+        <CollapsibleContent asChild>
+          <div className="mt-4 text-sm space-y-2">
+            {data.map((eventItem, index) => (
+              <div key={index}>{eventItem.title}</div>
+            ))}
+          </div>
+        </CollapsibleContent>
+      </Collapsible>
+    </div>
+  );
+}
diff --git a/templates/types/streaming/nextjs/app/components/ui/chat/chat-message.tsx b/templates/types/streaming/nextjs/app/components/ui/chat/chat-message.tsx
index cc037f91..c8dcc13a 100644
--- a/templates/types/streaming/nextjs/app/components/ui/chat/chat-message.tsx
+++ b/templates/types/streaming/nextjs/app/components/ui/chat/chat-message.tsx
@@ -4,10 +4,12 @@ import { Message } from "ai";
 import { Fragment } from "react";
 import { Button } from "../button";
 import ChatAvatar from "./chat-avatar";
+import { ChatEvents } from "./chat-events";
 import { ChatImage } from "./chat-image";
 import { ChatSources } from "./chat-sources";
 import {
   AnnotationData,
+  EventData,
   ImageData,
   MessageAnnotation,
   MessageAnnotationType,
@@ -16,7 +18,7 @@ import {
 import Markdown from "./markdown";
 import { useCopyToClipboard } from "./use-copy-to-clipboard";
 
-type ContentDiplayConfig = {
+type ContentDisplayConfig = {
   order: number;
   component: JSX.Element | null;
 };
@@ -24,11 +26,17 @@ type ContentDiplayConfig = {
 function getAnnotationData<T extends AnnotationData>(
   annotations: MessageAnnotation[],
   type: MessageAnnotationType,
-): T | undefined {
-  return annotations.find((a) => a.type === type)?.data as T | undefined;
+): T[] {
+  return annotations.filter((a) => a.type === type).map((a) => a.data as T);
 }
 
-function ChatMessageContent({ message }: { message: Message }) {
+function ChatMessageContent({
+  message,
+  isLoading,
+}: {
+  message: Message;
+  isLoading: boolean;
+}) {
   const annotations = message.annotations as MessageAnnotation[] | undefined;
   if (!annotations?.length) return <Markdown content={message.content} />;
 
@@ -36,15 +44,26 @@ function ChatMessageContent({ message }: { message: Message }) {
     annotations,
     MessageAnnotationType.IMAGE,
   );
+  const eventData = getAnnotationData<EventData>(
+    annotations,
+    MessageAnnotationType.EVENTS,
+  );
   const sourceData = getAnnotationData<SourceData>(
     annotations,
     MessageAnnotationType.SOURCES,
   );
 
-  const contents: ContentDiplayConfig[] = [
+  const contents: ContentDisplayConfig[] = [
+    {
+      order: -2,
+      component: imageData[0] ? <ChatImage data={imageData[0]} /> : null,
+    },
     {
       order: -1,
-      component: imageData ? <ChatImage data={imageData} /> : null,
+      component:
+        eventData.length > 0 ? (
+          <ChatEvents isLoading={isLoading} data={eventData} />
+        ) : null,
     },
     {
       order: 0,
@@ -52,7 +71,7 @@ function ChatMessageContent({ message }: { message: Message }) {
     },
     {
       order: 1,
-      component: sourceData ? <ChatSources data={sourceData} /> : null,
+      component: sourceData[0] ? <ChatSources data={sourceData[0]} /> : null,
     },
   ];
 
@@ -67,13 +86,19 @@ function ChatMessageContent({ message }: { message: Message }) {
   );
 }
 
-export default function ChatMessage(chatMessage: Message) {
+export default function ChatMessage({
+  chatMessage,
+  isLoading,
+}: {
+  chatMessage: Message;
+  isLoading: boolean;
+}) {
   const { isCopied, copyToClipboard } = useCopyToClipboard({ timeout: 2000 });
   return (
     <div className="flex items-start gap-4 pr-5 pt-5">
       <ChatAvatar role={chatMessage.role} />
       <div className="group flex flex-1 justify-between gap-2">
-        <ChatMessageContent message={chatMessage} />
+        <ChatMessageContent message={chatMessage} isLoading={isLoading} />
         <Button
           onClick={() => copyToClipboard(chatMessage.content)}
           size="icon"
diff --git a/templates/types/streaming/nextjs/app/components/ui/chat/chat-messages.tsx b/templates/types/streaming/nextjs/app/components/ui/chat/chat-messages.tsx
index abc3e52d..5f0eab6b 100644
--- a/templates/types/streaming/nextjs/app/components/ui/chat/chat-messages.tsx
+++ b/templates/types/streaming/nextjs/app/components/ui/chat/chat-messages.tsx
@@ -41,7 +41,7 @@ export default function ChatMessages(
         ref={scrollableChatContainerRef}
       >
         {props.messages.map((m) => (
-          <ChatMessage key={m.id} {...m} />
+          <ChatMessage key={m.id} chatMessage={m} isLoading={props.isLoading} />
         ))}
         {isPending && (
           <div className="flex justify-center items-center pt-10">
diff --git a/templates/types/streaming/nextjs/app/components/ui/chat/index.ts b/templates/types/streaming/nextjs/app/components/ui/chat/index.ts
index 87d8d306..464f195a 100644
--- a/templates/types/streaming/nextjs/app/components/ui/chat/index.ts
+++ b/templates/types/streaming/nextjs/app/components/ui/chat/index.ts
@@ -7,6 +7,7 @@ export { ChatInput, ChatMessages };
 export enum MessageAnnotationType {
   IMAGE = "image",
   SOURCES = "sources",
+  EVENTS = "events",
 }
 
 export type ImageData = {
@@ -24,7 +25,12 @@ export type SourceData = {
   nodes: SourceNode[];
 };
 
-export type AnnotationData = ImageData | SourceData;
+export type EventData = {
+  title: string;
+  isCollapsed: boolean;
+};
+
+export type AnnotationData = ImageData | SourceData | EventData;
 
 export type MessageAnnotation = {
   type: MessageAnnotationType;
diff --git a/templates/types/streaming/nextjs/app/components/ui/collapsible.tsx b/templates/types/streaming/nextjs/app/components/ui/collapsible.tsx
new file mode 100644
index 00000000..1fe76f5d
--- /dev/null
+++ b/templates/types/streaming/nextjs/app/components/ui/collapsible.tsx
@@ -0,0 +1,11 @@
+"use client";
+
+import * as CollapsiblePrimitive from "@radix-ui/react-collapsible";
+
+const Collapsible = CollapsiblePrimitive.Root;
+
+const CollapsibleTrigger = CollapsiblePrimitive.CollapsibleTrigger;
+
+const CollapsibleContent = CollapsiblePrimitive.CollapsibleContent;
+
+export { Collapsible, CollapsibleContent, CollapsibleTrigger };
diff --git a/templates/types/streaming/nextjs/package.json b/templates/types/streaming/nextjs/package.json
index 221c6998..707bd689 100644
--- a/templates/types/streaming/nextjs/package.json
+++ b/templates/types/streaming/nextjs/package.json
@@ -10,6 +10,7 @@
     "lint": "next lint"
   },
   "dependencies": {
+    "@radix-ui/react-collapsible": "^1.0.3",
     "@radix-ui/react-hover-card": "^1.0.7",
     "@radix-ui/react-slot": "^1.0.2",
     "ai": "^3.0.21",
-- 
GitLab