diff --git a/templates/components/engines/python/chat/__init__.py b/templates/components/engines/python/chat/__init__.py
index cec9725632a1b2199ed729a1aa950bf770e3bbad..80bcc41a571cad303a1f562e4b2c57cc2c6b0b07 100644
--- a/templates/components/engines/python/chat/__init__.py
+++ b/templates/components/engines/python/chat/__init__.py
@@ -1,5 +1,6 @@
 import os
 from app.engine.index import get_index
+from fastapi import HTTPException
 
 
 def get_chat_engine():
@@ -8,8 +9,11 @@ def get_chat_engine():
 
     index = get_index()
     if index is None:
-        raise Exception(
-            "StorageContext is empty - call 'poetry run generate' to generate the storage first"
+        raise HTTPException(
+            status_code=500,
+            detail=str(
+                "StorageContext is empty - call 'poetry run generate' to generate the storage first"
+            ),
         )
 
     return index.as_chat_engine(
diff --git a/templates/types/streaming/express/src/controllers/chat-request.controller.ts b/templates/types/streaming/express/src/controllers/chat-request.controller.ts
index 056edc5d615e452301980be9e08f5b08a57c8d08..117713fb2d53e2b317e9a0eec8116fa088bc73dd 100644
--- a/templates/types/streaming/express/src/controllers/chat-request.controller.ts
+++ b/templates/types/streaming/express/src/controllers/chat-request.controller.ts
@@ -57,7 +57,7 @@ export const chatRequest = async (req: Request, res: Response) => {
   } catch (error) {
     console.error("[LlamaIndex]", error);
     return res.status(500).json({
-      error: (error as Error).message,
+      detail: (error as Error).message,
     });
   }
 };
diff --git a/templates/types/streaming/express/src/controllers/chat.controller.ts b/templates/types/streaming/express/src/controllers/chat.controller.ts
index 7bdf98da61a69b1550d05fa363f174df22050e5e..f200628a1b4e3e0ac08acb1cefa46e5ad5551531 100644
--- a/templates/types/streaming/express/src/controllers/chat.controller.ts
+++ b/templates/types/streaming/express/src/controllers/chat.controller.ts
@@ -71,7 +71,7 @@ export const chat = async (req: Request, res: Response) => {
   } catch (error) {
     console.error("[LlamaIndex]", error);
     return res.status(500).json({
-      error: (error as Error).message,
+      detail: (error as Error).message,
     });
   }
 };
diff --git a/templates/types/streaming/nextjs/app/api/chat/route.ts b/templates/types/streaming/nextjs/app/api/chat/route.ts
index 92e874bb0daca21518b581e2d67a7828b78c2b6b..f836cbed19d04b81c15215b1dbe409e98c68d1a6 100644
--- a/templates/types/streaming/nextjs/app/api/chat/route.ts
+++ b/templates/types/streaming/nextjs/app/api/chat/route.ts
@@ -74,7 +74,7 @@ export async function POST(request: NextRequest) {
     console.error("[LlamaIndex]", error);
     return NextResponse.json(
       {
-        error: (error as Error).message,
+        detail: (error as Error).message,
       },
       {
         status: 500,
diff --git a/templates/types/streaming/nextjs/app/components/chat-section.tsx b/templates/types/streaming/nextjs/app/components/chat-section.tsx
index ceb31c9e2f4cf8d1145113113a5e1987284aaac8..e98607462c10173c9dfa77479288944c2efdf748 100644
--- a/templates/types/streaming/nextjs/app/components/chat-section.tsx
+++ b/templates/types/streaming/nextjs/app/components/chat-section.tsx
@@ -17,6 +17,10 @@ export default function ChatSection() {
     headers: {
       "Content-Type": "application/json", // using JSON because of vercel/ai 2.2.26
     },
+    onError: (error) => {
+      const message = JSON.parse(error.message);
+      alert(message.detail);
+    },
   });
 
   return (