From 747bbbc73e1dd75ddf4c9d3cbf12e9567a8b538f Mon Sep 17 00:00:00 2001
From: Marcus Schiesser <mail@marcusschiesser.de>
Date: Fri, 24 Nov 2023 17:10:02 +0700
Subject: [PATCH] fix: set maxTokens to 4096 so vision model is not stopping
 too early (seems to have a lower default than other models)

---
 templates/types/streaming/nextjs/app/api/chat/route.ts | 1 +
 1 file changed, 1 insertion(+)

diff --git a/templates/types/streaming/nextjs/app/api/chat/route.ts b/templates/types/streaming/nextjs/app/api/chat/route.ts
index 850ab55f..eb3ddb0d 100644
--- a/templates/types/streaming/nextjs/app/api/chat/route.ts
+++ b/templates/types/streaming/nextjs/app/api/chat/route.ts
@@ -44,6 +44,7 @@ export async function POST(request: NextRequest) {
 
     const llm = new OpenAI({
       model: MODEL,
+      maxTokens: 4096,
     });
 
     const chatEngine = await createChatEngine(llm);
-- 
GitLab