diff --git a/packages/create-llama/templates/types/simple/express/src/controllers/chat.controller.ts b/packages/create-llama/templates/types/simple/express/src/controllers/chat.controller.ts
index 46dfa56842916908cc3b6df184546d791447f03e..9f9639b72724669e42ddd2ad25dcc2d31368c07c 100644
--- a/packages/create-llama/templates/types/simple/express/src/controllers/chat.controller.ts
+++ b/packages/create-llama/templates/types/simple/express/src/controllers/chat.controller.ts
@@ -2,7 +2,7 @@ import { Request, Response } from "express";
 import { ChatMessage, MessageContent, OpenAI } from "llamaindex";
 import { createChatEngine } from "./engine";
 
-const getLastMessageContent = (
+const convertMessageContent = (
   textMessage: string,
   imageUrl: string | undefined,
 ): MessageContent => {
@@ -24,8 +24,8 @@ const getLastMessageContent = (
 export const chat = async (req: Request, res: Response) => {
   try {
     const { messages, data }: { messages: ChatMessage[]; data: any } = req.body;
-    const lastMessage = messages.pop();
-    if (!messages || !lastMessage || lastMessage.role !== "user") {
+    const userMessage = messages.pop();
+    if (!messages || !userMessage || userMessage.role !== "user") {
       return res.status(400).json({
         error:
           "messages are required in the request body and the last message must be from the user",
@@ -36,17 +36,20 @@ export const chat = async (req: Request, res: Response) => {
       model: process.env.MODEL || "gpt-3.5-turbo",
     });
 
-    const lastMessageContent = getLastMessageContent(
-      lastMessage.content,
+    // Convert message content from Vercel/AI format to LlamaIndex/OpenAI format
+    // Note: The non-streaming template does not need the Vercel/AI format, we're still using it for consistency with the streaming template
+    const userMessageContent = convertMessageContent(
+      userMessage.content,
       data?.imageUrl,
     );
 
     const chatEngine = await createChatEngine(llm);
 
-    const response = await chatEngine.chat(
-      lastMessageContent as MessageContent,
+    // Calling LlamaIndex's ChatEngine to get a response
+    const response = await chatEngine.chat({
+      message: userMessageContent,
       messages,
-    );
+    });
     const result: ChatMessage = {
       role: "assistant",
       content: response.response,
diff --git a/packages/create-llama/templates/types/streaming/express/src/controllers/chat.controller.ts b/packages/create-llama/templates/types/streaming/express/src/controllers/chat.controller.ts
index 4bd1c8da6f2e502462a7bf7221ed2874d656adc1..e82658016cbfb036f31485a79d8e83a2c8fe376f 100644
--- a/packages/create-llama/templates/types/streaming/express/src/controllers/chat.controller.ts
+++ b/packages/create-llama/templates/types/streaming/express/src/controllers/chat.controller.ts
@@ -4,7 +4,7 @@ import { ChatMessage, MessageContent, OpenAI } from "llamaindex";
 import { createChatEngine } from "./engine";
 import { LlamaIndexStream } from "./llamaindex-stream";
 
-const getLastMessageContent = (
+const convertMessageContent = (
   textMessage: string,
   imageUrl: string | undefined,
 ): MessageContent => {
@@ -26,8 +26,8 @@ const getLastMessageContent = (
 export const chat = async (req: Request, res: Response) => {
   try {
     const { messages, data }: { messages: ChatMessage[]; data: any } = req.body;
-    const lastMessage = messages.pop();
-    if (!messages || !lastMessage || lastMessage.role !== "user") {
+    const userMessage = messages.pop();
+    if (!messages || !userMessage || userMessage.role !== "user") {
       return res.status(400).json({
         error:
           "messages are required in the request body and the last message must be from the user",
@@ -40,18 +40,20 @@ export const chat = async (req: Request, res: Response) => {
 
     const chatEngine = await createChatEngine(llm);
 
-    const lastMessageContent = getLastMessageContent(
-      lastMessage.content,
+    // Convert message content from Vercel/AI format to LlamaIndex/OpenAI format
+    const userMessageContent = convertMessageContent(
+      userMessage.content,
       data?.imageUrl,
     );
 
-    const response = await chatEngine.chat(
-      lastMessageContent as MessageContent,
-      messages,
-      true,
-    );
+    // Calling LlamaIndex's ChatEngine to get a streamed response
+    const response = await chatEngine.chat({
+      message: userMessageContent,
+      chatHistory: messages,
+      stream: true,
+    });
 
-    // Transform the response into a readable stream
+    // Return a stream, which can be consumed by the Vercel/AI client
     const stream = LlamaIndexStream(response);
 
     streamToResponse(stream, res);
diff --git a/packages/create-llama/templates/types/streaming/express/src/controllers/llamaindex-stream.ts b/packages/create-llama/templates/types/streaming/express/src/controllers/llamaindex-stream.ts
index 12328de875d8a59a5501db7fbf2aa0d763a3dbaa..e86c7626f09e2beca9ce15d922a37679c9906e43 100644
--- a/packages/create-llama/templates/types/streaming/express/src/controllers/llamaindex-stream.ts
+++ b/packages/create-llama/templates/types/streaming/express/src/controllers/llamaindex-stream.ts
@@ -4,18 +4,20 @@ import {
   trimStartOfStreamHelper,
   type AIStreamCallbacksAndOptions,
 } from "ai";
+import { Response } from "llamaindex";
 
-function createParser(res: AsyncGenerator<any>) {
+function createParser(res: AsyncIterable<Response>) {
+  const it = res[Symbol.asyncIterator]();
   const trimStartOfStream = trimStartOfStreamHelper();
   return new ReadableStream<string>({
     async pull(controller): Promise<void> {
-      const { value, done } = await res.next();
+      const { value, done } = await it.next();
       if (done) {
         controller.close();
         return;
       }
 
-      const text = trimStartOfStream(value ?? "");
+      const text = trimStartOfStream(value.response ?? "");
       if (text) {
         controller.enqueue(text);
       }
@@ -24,7 +26,7 @@ function createParser(res: AsyncGenerator<any>) {
 }
 
 export function LlamaIndexStream(
-  res: AsyncGenerator<any>,
+  res: AsyncIterable<Response>,
   callbacks?: AIStreamCallbacksAndOptions,
 ): ReadableStream {
   return createParser(res)
diff --git a/packages/create-llama/templates/types/streaming/nextjs/app/api/chat/llamaindex-stream.ts b/packages/create-llama/templates/types/streaming/nextjs/app/api/chat/llamaindex-stream.ts
index 5ac376d63d83618ee22a9dcd42286a56bfe59620..6ddd8eae68bc199188d07a0af8f27a12b2a6abb3 100644
--- a/packages/create-llama/templates/types/streaming/nextjs/app/api/chat/llamaindex-stream.ts
+++ b/packages/create-llama/templates/types/streaming/nextjs/app/api/chat/llamaindex-stream.ts
@@ -6,16 +6,18 @@ import {
   trimStartOfStreamHelper,
   type AIStreamCallbacksAndOptions,
 } from "ai";
+import { Response } from "llamaindex";
 
 type ParserOptions = {
   image_url?: string;
 };
 
 function createParser(
-  res: AsyncGenerator<any>,
+  res: AsyncIterable<Response>,
   data: experimental_StreamData,
   opts?: ParserOptions,
 ) {
+  const it = res[Symbol.asyncIterator]();
   const trimStartOfStream = trimStartOfStreamHelper();
   return new ReadableStream<string>({
     start() {
@@ -33,7 +35,7 @@ function createParser(
       }
     },
     async pull(controller): Promise<void> {
-      const { value, done } = await res.next();
+      const { value, done } = await it.next();
       if (done) {
         controller.close();
         data.append({}); // send an empty image response for the assistant's message
@@ -41,7 +43,7 @@ function createParser(
         return;
       }
 
-      const text = trimStartOfStream(value ?? "");
+      const text = trimStartOfStream(value.response ?? "");
       if (text) {
         controller.enqueue(text);
       }
@@ -50,7 +52,7 @@ function createParser(
 }
 
 export function LlamaIndexStream(
-  res: AsyncGenerator<any>,
+  res: AsyncIterable<Response>,
   opts?: {
     callbacks?: AIStreamCallbacksAndOptions;
     parserOptions?: ParserOptions;
diff --git a/packages/create-llama/templates/types/streaming/nextjs/app/api/chat/route.ts b/packages/create-llama/templates/types/streaming/nextjs/app/api/chat/route.ts
index a4a9f30b7549a92d1bbfd6ea8a0a99415f733f5e..ef35bf76e427e27ea4f05460624c91017b2112b1 100644
--- a/packages/create-llama/templates/types/streaming/nextjs/app/api/chat/route.ts
+++ b/packages/create-llama/templates/types/streaming/nextjs/app/api/chat/route.ts
@@ -1,4 +1,4 @@
-import { Message, StreamingTextResponse } from "ai";
+import { StreamingTextResponse } from "ai";
 import { ChatMessage, MessageContent, OpenAI } from "llamaindex";
 import { NextRequest, NextResponse } from "next/server";
 import { createChatEngine } from "./engine";
@@ -7,7 +7,7 @@ import { LlamaIndexStream } from "./llamaindex-stream";
 export const runtime = "nodejs";
 export const dynamic = "force-dynamic";
 
-const getLastMessageContent = (
+const convertMessageContent = (
   textMessage: string,
   imageUrl: string | undefined,
 ): MessageContent => {
@@ -29,9 +29,9 @@ const getLastMessageContent = (
 export async function POST(request: NextRequest) {
   try {
     const body = await request.json();
-    const { messages, data }: { messages: Message[]; data: any } = body;
-    const lastMessage = messages.pop();
-    if (!messages || !lastMessage || lastMessage.role !== "user") {
+    const { messages, data }: { messages: ChatMessage[]; data: any } = body;
+    const userMessage = messages.pop();
+    if (!messages || !userMessage || userMessage.role !== "user") {
       return NextResponse.json(
         {
           error:
@@ -48,25 +48,27 @@ export async function POST(request: NextRequest) {
 
     const chatEngine = await createChatEngine(llm);
 
-    const lastMessageContent = getLastMessageContent(
-      lastMessage.content,
+    // Convert message content from Vercel/AI format to LlamaIndex/OpenAI format
+    const userMessageContent = convertMessageContent(
+      userMessage.content,
       data?.imageUrl,
     );
 
-    const response = await chatEngine.chat(
-      lastMessageContent as MessageContent,
-      messages as ChatMessage[],
-      true,
-    );
+    // Calling LlamaIndex's ChatEngine to get a streamed response
+    const response = await chatEngine.chat({
+      message: userMessageContent,
+      chatHistory: messages,
+      stream: true,
+    });
 
-    // Transform the response into a readable stream
+    // Transform LlamaIndex stream to Vercel/AI format
     const { stream, data: streamData } = LlamaIndexStream(response, {
       parserOptions: {
         image_url: data?.imageUrl,
       },
     });
 
-    // Return a StreamingTextResponse, which can be consumed by the client
+    // Return a StreamingTextResponse, which can be consumed by the Vercel/AI client
     return new StreamingTextResponse(stream, {}, streamData);
   } catch (error) {
     console.error("[LlamaIndex]", error);