diff --git a/apps/next/next.config.mjs b/apps/next/next.config.mjs
index 3248e75aec380065984782f194c53f0a992f6a42..4634aec95adcaae2e7fe9cd59bd119d413bef450 100644
--- a/apps/next/next.config.mjs
+++ b/apps/next/next.config.mjs
@@ -6,6 +6,7 @@ const withMDX = createMDX();
 const config = {
   reactStrictMode: true,
   transpilePackages: ["monaco-editor"],
+  serverExternalPackages: ["@huggingface/transformers"],
   webpack: (config, { isServer }) => {
     if (Array.isArray(config.target) && config.target.includes("web")) {
       config.target = ["web", "es2020"];
@@ -26,6 +27,7 @@ const config = {
         }),
       );
     }
+    config.resolve.alias["replicate"] = false;
     return config;
   },
 };
diff --git a/apps/next/src/app/api/chat/route.ts b/apps/next/src/app/api/chat/route.ts
index 5852ff29568abc5b6e5585b0412e91a54993ba69..49cbd9c011661bb9763f81726dd539ea0cac9a9c 100644
--- a/apps/next/src/app/api/chat/route.ts
+++ b/apps/next/src/app/api/chat/route.ts
@@ -1,6 +1,9 @@
-import { Message } from "ai";
-import { simulateReadableStream } from "ai/test";
-import { NextRequest, NextResponse } from "next/server";
+import { llm } from "@/lib/utils";
+import { LlamaIndexAdapter, type Message } from "ai";
+import { Settings, SimpleChatEngine, type ChatMessage } from "llamaindex";
+import { NextResponse, type NextRequest } from "next/server";
+
+Settings.llm = llm;
 
 export async function POST(request: NextRequest) {
   try {
@@ -12,19 +15,16 @@ export async function POST(request: NextRequest) {
         { status: 400 },
       );
     }
-    const mockResponse = `Hello! This is a mock response to: ${userMessage.content}`;
-    return new Response(
-      simulateReadableStream({
-        chunkDelayInMs: 20,
-        values: mockResponse.split(" ").map((t) => `0:"${t} "\n`),
-      }).pipeThrough(new TextEncoderStream()),
-      {
-        status: 200,
-        headers: {
-          "X-Vercel-AI-Data-Stream": "v1",
-          "Content-Type": "text/plain; charset=utf-8",
-        },
-      },
+
+    const chatEngine = new SimpleChatEngine();
+
+    return LlamaIndexAdapter.toDataStreamResponse(
+      await chatEngine.chat({
+        message: userMessage.content,
+        chatHistory: messages as ChatMessage[],
+        stream: true,
+      }),
+      {},
     );
   } catch (error) {
     const detail = (error as Error).message;
diff --git a/apps/next/src/components/demo/chat/rsc/ai-action.tsx b/apps/next/src/components/demo/chat/rsc/ai-action.tsx
index f8d944ad13e86b33d527b00bca6e5d1a362cc4a7..dd74f5e0da5e45b1524719a793d39371169a5856 100644
--- a/apps/next/src/components/demo/chat/rsc/ai-action.tsx
+++ b/apps/next/src/components/demo/chat/rsc/ai-action.tsx
@@ -1,7 +1,8 @@
+import { llm } from "@/lib/utils";
 import { Markdown } from "@llamaindex/chat-ui/widgets";
-import { generateId, Message, parseStreamPart } from "ai";
+import { generateId, Message } from "ai";
 import { createAI, createStreamableUI, getMutableAIState } from "ai/rsc";
-import { simulateReadableStream } from "ai/test";
+import { type ChatMessage, Settings, SimpleChatEngine } from "llamaindex";
 import { ReactNode } from "react";
 
 type ServerState = Message[];
@@ -10,6 +11,8 @@ type Actions = {
   chat: (message: Message) => Promise<Message & { display: ReactNode }>;
 };
 
+Settings.llm = llm;
+
 export const AI = createAI<ServerState, FrontendState, Actions>({
   initialAIState: [],
   initialUIState: [],
@@ -20,31 +23,30 @@ export const AI = createAI<ServerState, FrontendState, Actions>({
       const aiState = getMutableAIState<typeof AI>();
       aiState.update((prev) => [...prev, message]);
 
-      const mockResponse = `Hello! This is a mock response to: ${message.content}`;
-      const responseStream = simulateReadableStream({
-        chunkDelayInMs: 20,
-        values: mockResponse.split(" ").map((t) => `0:"${t} "\n`),
-      });
-
       const uiStream = createStreamableUI();
+      const chatEngine = new SimpleChatEngine();
       const assistantMessage: Message = {
         id: generateId(),
         role: "assistant",
         content: "",
       };
 
-      responseStream.pipeTo(
-        new WritableStream({
-          write: async (message) => {
-            assistantMessage.content += parseStreamPart(message).value;
-            uiStream.update(<Markdown content={assistantMessage.content} />);
-          },
-          close: () => {
-            aiState.done([...aiState.get(), assistantMessage]);
-            uiStream.done();
-          },
-        }),
-      );
+      // run the async function without blocking
+      (async () => {
+        const chatResponse = await chatEngine.chat({
+          stream: true,
+          message: message.content,
+          chatHistory: aiState.get() as ChatMessage[],
+        });
+
+        for await (const chunk of chatResponse) {
+          assistantMessage.content += chunk.delta;
+          uiStream.update(<Markdown content={assistantMessage.content} />);
+        }
+
+        aiState.done([...aiState.get(), assistantMessage]);
+        uiStream.done();
+      })();
 
       return {
         ...assistantMessage,
diff --git a/apps/next/src/lib/utils.ts b/apps/next/src/lib/utils.ts
index bd0c391ddd1088e9067844c48835bf4abcd61783..e073bc94585c2568bb4cad80741ceaec30df9a11 100644
--- a/apps/next/src/lib/utils.ts
+++ b/apps/next/src/lib/utils.ts
@@ -1,6 +1,34 @@
-import { clsx, type ClassValue } from "clsx"
-import { twMerge } from "tailwind-merge"
+import { clsx, type ClassValue } from "clsx";
+import { LLM, LLMMetadata } from "llamaindex";
+import { twMerge } from "tailwind-merge";
 
 export function cn(...inputs: ClassValue[]) {
   return twMerge(clsx(inputs))
 }
+
+class MockLLM  {
+  metadata: LLMMetadata = {
+    model: "MockLLM",
+    temperature: 0.5,
+    topP: 0.5,
+    contextWindow: 1024,
+    tokenizer: undefined,
+  };
+
+  chat() {
+    const mockResponse = "Hello! This is a mock response";
+    return Promise.resolve(
+      new ReadableStream({
+        async start(controller) {
+          for (const char of mockResponse) {
+            controller.enqueue({ delta: char });
+            await new Promise((resolve) => setTimeout(resolve, 20));
+          }
+          controller.close();
+        },
+      }),
+    );
+  }
+}
+
+export const llm = new MockLLM() as unknown as LLM;
\ No newline at end of file