From 81cf59de0bd0b4fc0f4dc9151ff142bafc2bf5a8 Mon Sep 17 00:00:00 2001
From: thucpn <thucsh2@gmail.com>
Date: Fri, 22 Dec 2023 16:27:39 +0700
Subject: [PATCH] feat: remove constants.ts in ts templates

---
 templates/index.ts                            | 27 ++++++++++++-------
 templates/types/simple/express/constants.ts   |  1 -
 .../src/controllers/chat.controller.ts        |  3 +--
 .../types/streaming/express/constants.ts      |  1 -
 .../src/controllers/chat.controller.ts        |  3 +--
 .../streaming/nextjs/app/api/chat/route.ts    |  3 +--
 .../nextjs/app/components/chat-section.tsx    |  3 +--
 templates/types/streaming/nextjs/constants.ts |  1 -
 8 files changed, 21 insertions(+), 21 deletions(-)
 delete mode 100644 templates/types/simple/express/constants.ts
 delete mode 100644 templates/types/streaming/express/constants.ts
 delete mode 100644 templates/types/streaming/nextjs/constants.ts

diff --git a/templates/index.ts b/templates/index.ts
index 8392a7b0..615214b1 100644
--- a/templates/index.ts
+++ b/templates/index.ts
@@ -19,17 +19,24 @@ import {
 
 const createEnvLocalFile = async (
   root: string,
-  openAiKey?: string,
-  vectorDb?: TemplateVectorDB,
+  opts?: {
+    openAiKey?: string;
+    vectorDb?: TemplateVectorDB;
+    model?: string;
+  },
 ) => {
   const envFileName = ".env";
   let content = "";
 
-  if (openAiKey) {
-    content += `OPENAI_API_KEY=${openAiKey}\n`;
+  const model = opts?.model || "gpt-3.5-turbo";
+  content += `NEXT_PUBLIC_MODEL=${model}\n`;
+  content += `MODEL=${model}\n`;
+
+  if (opts?.openAiKey) {
+    content += `OPENAI_API_KEY=${opts?.openAiKey}\n`;
   }
 
-  switch (vectorDb) {
+  switch (opts?.vectorDb) {
     case "mongo": {
       content += `MONGODB_URI=\n`;
       content += `MONGODB_DATABASE=\n`;
@@ -205,10 +212,6 @@ const installTSTemplate = async ({
   }
 
   if (framework === "nextjs" || framework === "express") {
-    await fs.writeFile(
-      path.join(root, "constants.ts"),
-      `export const MODEL = "${model || "gpt-3.5-turbo"}";\n`,
-    );
     console.log("\nUsing OpenAI model: ", model || "gpt-3.5-turbo", "\n");
   }
 
@@ -369,7 +372,11 @@ export const installTemplate = async (
     // This is a backend, so we need to copy the test data and create the env file.
 
     // Copy the environment file to the target directory.
-    await createEnvLocalFile(props.root, props.openAiKey, props.vectorDb);
+    await createEnvLocalFile(props.root, {
+      openAiKey: props.openAiKey,
+      vectorDb: props.vectorDb,
+      model: props.model,
+    });
 
     // Copy test pdf file
     await copyTestData(
diff --git a/templates/types/simple/express/constants.ts b/templates/types/simple/express/constants.ts
deleted file mode 100644
index 90894925..00000000
--- a/templates/types/simple/express/constants.ts
+++ /dev/null
@@ -1 +0,0 @@
-export const MODEL = "gpt-3.5-turbo";
diff --git a/templates/types/simple/express/src/controllers/chat.controller.ts b/templates/types/simple/express/src/controllers/chat.controller.ts
index 8aa08613..63a1b1dd 100644
--- a/templates/types/simple/express/src/controllers/chat.controller.ts
+++ b/templates/types/simple/express/src/controllers/chat.controller.ts
@@ -1,6 +1,5 @@
 import { Request, Response } from "express";
 import { ChatMessage, MessageContent, OpenAI } from "llamaindex";
-import { MODEL } from "../../constants";
 import { createChatEngine } from "./engine";
 
 const getLastMessageContent = (
@@ -34,7 +33,7 @@ export const chat = async (req: Request, res: Response) => {
     }
 
     const llm = new OpenAI({
-      model: MODEL,
+      model: process.env.MODEL,
     });
 
     const lastMessageContent = getLastMessageContent(
diff --git a/templates/types/streaming/express/constants.ts b/templates/types/streaming/express/constants.ts
deleted file mode 100644
index 90894925..00000000
--- a/templates/types/streaming/express/constants.ts
+++ /dev/null
@@ -1 +0,0 @@
-export const MODEL = "gpt-3.5-turbo";
diff --git a/templates/types/streaming/express/src/controllers/chat.controller.ts b/templates/types/streaming/express/src/controllers/chat.controller.ts
index 1dbd85d4..f1f50b48 100644
--- a/templates/types/streaming/express/src/controllers/chat.controller.ts
+++ b/templates/types/streaming/express/src/controllers/chat.controller.ts
@@ -1,7 +1,6 @@
 import { streamToResponse } from "ai";
 import { Request, Response } from "express";
 import { ChatMessage, MessageContent, OpenAI } from "llamaindex";
-import { MODEL } from "../../constants";
 import { createChatEngine } from "./engine";
 import { LlamaIndexStream } from "./llamaindex-stream";
 
@@ -36,7 +35,7 @@ export const chat = async (req: Request, res: Response) => {
     }
 
     const llm = new OpenAI({
-      model: MODEL,
+      model: process.env.MODEL,
     });
 
     const chatEngine = await createChatEngine(llm);
diff --git a/templates/types/streaming/nextjs/app/api/chat/route.ts b/templates/types/streaming/nextjs/app/api/chat/route.ts
index 99fea0f6..b83855bc 100644
--- a/templates/types/streaming/nextjs/app/api/chat/route.ts
+++ b/templates/types/streaming/nextjs/app/api/chat/route.ts
@@ -1,4 +1,3 @@
-import { MODEL } from "@/constants";
 import { Message, StreamingTextResponse } from "ai";
 import { MessageContent, OpenAI } from "llamaindex";
 import { NextRequest, NextResponse } from "next/server";
@@ -43,7 +42,7 @@ export async function POST(request: NextRequest) {
     }
 
     const llm = new OpenAI({
-      model: MODEL,
+      model: process.env.MODEL,
       maxTokens: 512,
     });
 
diff --git a/templates/types/streaming/nextjs/app/components/chat-section.tsx b/templates/types/streaming/nextjs/app/components/chat-section.tsx
index b42edb27..e51eeef3 100644
--- a/templates/types/streaming/nextjs/app/components/chat-section.tsx
+++ b/templates/types/streaming/nextjs/app/components/chat-section.tsx
@@ -1,6 +1,5 @@
 "use client";
 
-import { MODEL } from "@/constants";
 import { useChat } from "ai/react";
 import { ChatInput, ChatMessages } from "./ui/chat";
 
@@ -33,7 +32,7 @@ export default function ChatSection() {
         handleSubmit={handleSubmit}
         handleInputChange={handleInputChange}
         isLoading={isLoading}
-        multiModal={MODEL === "gpt-4-vision-preview"}
+        multiModal={process.env.NEXT_PUBLIC_MODEL === "gpt-4-vision-preview"}
       />
     </div>
   );
diff --git a/templates/types/streaming/nextjs/constants.ts b/templates/types/streaming/nextjs/constants.ts
deleted file mode 100644
index 0959a5f6..00000000
--- a/templates/types/streaming/nextjs/constants.ts
+++ /dev/null
@@ -1 +0,0 @@
-export const MODEL = "gpt-4-vision-preview";
-- 
GitLab