From 2f7b05006bcf1d7f496b91b6fe08c5e61fb1597f Mon Sep 17 00:00:00 2001
From: Thuc Pham <51660321+thucpn@users.noreply.github.com>
Date: Thu, 14 Dec 2023 11:41:43 +0700
Subject: [PATCH] Fix/express app can not parse request json body (#17)

---
 .../templates/types/simple/express/index.ts   |  2 +
 .../src/controllers/chat.controller.ts        | 37 +++++++++++++++---
 .../types/streaming/express/index.ts          |  2 +
 .../src/controllers/chat.controller.ts        | 38 ++++++++++++++++---
 .../nextjs/app/components/chat-section.tsx    |  7 +++-
 5 files changed, 75 insertions(+), 11 deletions(-)

diff --git a/packages/create-llama/templates/types/simple/express/index.ts b/packages/create-llama/templates/types/simple/express/index.ts
index 830c549f5..721c4ec9d 100644
--- a/packages/create-llama/templates/types/simple/express/index.ts
+++ b/packages/create-llama/templates/types/simple/express/index.ts
@@ -11,6 +11,8 @@ const env = process.env["NODE_ENV"];
 const isDevelopment = !env || env === "development";
 const prodCorsOrigin = process.env["PROD_CORS_ORIGIN"];
 
+app.use(express.json());
+
 if (isDevelopment) {
   console.warn("Running in development mode - allowing CORS for all origins");
   app.use(cors());
diff --git a/packages/create-llama/templates/types/simple/express/src/controllers/chat.controller.ts b/packages/create-llama/templates/types/simple/express/src/controllers/chat.controller.ts
index 6612971ac..8aa08613f 100644
--- a/packages/create-llama/templates/types/simple/express/src/controllers/chat.controller.ts
+++ b/packages/create-llama/templates/types/simple/express/src/controllers/chat.controller.ts
@@ -1,11 +1,30 @@
-import { NextFunction, Request, Response } from "express";
-import { ChatMessage, OpenAI } from "llamaindex";
+import { Request, Response } from "express";
+import { ChatMessage, MessageContent, OpenAI } from "llamaindex";
 import { MODEL } from "../../constants";
 import { createChatEngine } from "./engine";
 
-export const chat = async (req: Request, res: Response, next: NextFunction) => {
+const getLastMessageContent = (
+  textMessage: string,
+  imageUrl: string | undefined,
+): MessageContent => {
+  if (!imageUrl) return textMessage;
+  return [
+    {
+      type: "text",
+      text: textMessage,
+    },
+    {
+      type: "image_url",
+      image_url: {
+        url: imageUrl,
+      },
+    },
+  ];
+};
+
+export const chat = async (req: Request, res: Response) => {
   try {
-    const { messages }: { messages: ChatMessage[] } = JSON.parse(req.body);
+    const { messages, data }: { messages: ChatMessage[]; data: any } = req.body;
     const lastMessage = messages.pop();
     if (!messages || !lastMessage || lastMessage.role !== "user") {
       return res.status(400).json({
@@ -18,9 +37,17 @@ export const chat = async (req: Request, res: Response, next: NextFunction) => {
       model: MODEL,
     });
 
+    const lastMessageContent = getLastMessageContent(
+      lastMessage.content,
+      data?.imageUrl,
+    );
+
     const chatEngine = await createChatEngine(llm);
 
-    const response = await chatEngine.chat(lastMessage.content, messages);
+    const response = await chatEngine.chat(
+      lastMessageContent as MessageContent,
+      messages,
+    );
     const result: ChatMessage = {
       role: "assistant",
       content: response.response,
diff --git a/packages/create-llama/templates/types/streaming/express/index.ts b/packages/create-llama/templates/types/streaming/express/index.ts
index 830c549f5..721c4ec9d 100644
--- a/packages/create-llama/templates/types/streaming/express/index.ts
+++ b/packages/create-llama/templates/types/streaming/express/index.ts
@@ -11,6 +11,8 @@ const env = process.env["NODE_ENV"];
 const isDevelopment = !env || env === "development";
 const prodCorsOrigin = process.env["PROD_CORS_ORIGIN"];
 
+app.use(express.json());
+
 if (isDevelopment) {
   console.warn("Running in development mode - allowing CORS for all origins");
   app.use(cors());
diff --git a/packages/create-llama/templates/types/streaming/express/src/controllers/chat.controller.ts b/packages/create-llama/templates/types/streaming/express/src/controllers/chat.controller.ts
index 76b8fafdb..1dbd85d45 100644
--- a/packages/create-llama/templates/types/streaming/express/src/controllers/chat.controller.ts
+++ b/packages/create-llama/templates/types/streaming/express/src/controllers/chat.controller.ts
@@ -1,13 +1,32 @@
 import { streamToResponse } from "ai";
-import { NextFunction, Request, Response } from "express";
-import { ChatMessage, OpenAI } from "llamaindex";
+import { Request, Response } from "express";
+import { ChatMessage, MessageContent, OpenAI } from "llamaindex";
 import { MODEL } from "../../constants";
 import { createChatEngine } from "./engine";
 import { LlamaIndexStream } from "./llamaindex-stream";
 
-export const chat = async (req: Request, res: Response, next: NextFunction) => {
+const getLastMessageContent = (
+  textMessage: string,
+  imageUrl: string | undefined,
+): MessageContent => {
+  if (!imageUrl) return textMessage;
+  return [
+    {
+      type: "text",
+      text: textMessage,
+    },
+    {
+      type: "image_url",
+      image_url: {
+        url: imageUrl,
+      },
+    },
+  ];
+};
+
+export const chat = async (req: Request, res: Response) => {
   try {
-    const { messages }: { messages: ChatMessage[] } = JSON.parse(req.body);
+    const { messages, data }: { messages: ChatMessage[]; data: any } = req.body;
     const lastMessage = messages.pop();
     if (!messages || !lastMessage || lastMessage.role !== "user") {
       return res.status(400).json({
@@ -22,7 +41,16 @@ export const chat = async (req: Request, res: Response, next: NextFunction) => {
 
     const chatEngine = await createChatEngine(llm);
 
-    const response = await chatEngine.chat(lastMessage.content, messages, true);
+    const lastMessageContent = getLastMessageContent(
+      lastMessage.content,
+      data?.imageUrl,
+    );
+
+    const response = await chatEngine.chat(
+      lastMessageContent as MessageContent,
+      messages,
+      true,
+    );
 
     // Transform the response into a readable stream
     const stream = LlamaIndexStream(response);
diff --git a/packages/create-llama/templates/types/streaming/nextjs/app/components/chat-section.tsx b/packages/create-llama/templates/types/streaming/nextjs/app/components/chat-section.tsx
index 791b223f4..b42edb273 100644
--- a/packages/create-llama/templates/types/streaming/nextjs/app/components/chat-section.tsx
+++ b/packages/create-llama/templates/types/streaming/nextjs/app/components/chat-section.tsx
@@ -13,7 +13,12 @@ export default function ChatSection() {
     handleInputChange,
     reload,
     stop,
-  } = useChat({ api: process.env.NEXT_PUBLIC_CHAT_API });
+  } = useChat({
+    api: process.env.NEXT_PUBLIC_CHAT_API,
+    headers: {
+      "Content-Type": "application/json", // using JSON because of vercel/ai 2.2.26
+    },
+  });
 
   return (
     <div className="space-y-4 max-w-5xl w-full">
-- 
GitLab