From 314089b0a163daf9ec5f3bd5a91eb58e22815764 Mon Sep 17 00:00:00 2001
From: thucpn <thucsh2@gmail.com>
Date: Mon, 11 Dec 2023 13:57:59 +0700
Subject: [PATCH] feat: option to select model for express

---
 packages/create-llama/questions.ts                             | 2 +-
 packages/create-llama/templates/index.ts                       | 2 +-
 .../create-llama/templates/types/simple/express/constants.ts   | 1 +
 .../types/simple/express/src/controllers/chat.controller.ts    | 3 ++-
 .../templates/types/streaming/express/constants.ts             | 1 +
 .../types/streaming/express/src/controllers/chat.controller.ts | 3 ++-
 6 files changed, 8 insertions(+), 4 deletions(-)
 create mode 100644 packages/create-llama/templates/types/simple/express/constants.ts
 create mode 100644 packages/create-llama/templates/types/streaming/express/constants.ts

diff --git a/packages/create-llama/questions.ts b/packages/create-llama/questions.ts
index c3f5e696d..5b0f098ad 100644
--- a/packages/create-llama/questions.ts
+++ b/packages/create-llama/questions.ts
@@ -189,7 +189,7 @@ export const askQuestions = async (
     }
   }
 
-  if (program.framework === "nextjs") {
+  if (program.framework === "express" || program.framework === "nextjs") {
     if (!program.model) {
       if (ciInfo.isCI) {
         program.model = getPrefOrDefault("model");
diff --git a/packages/create-llama/templates/index.ts b/packages/create-llama/templates/index.ts
index 79746b62d..300078768 100644
--- a/packages/create-llama/templates/index.ts
+++ b/packages/create-llama/templates/index.ts
@@ -176,7 +176,7 @@ const installTSTemplate = async ({
     });
   }
 
-  if (framework === "nextjs") {
+  if (framework === "nextjs" || framework === "express") {
     await fs.writeFile(
       path.join(root, "constants.ts"),
       `export const MODEL = "${model || "gpt-3.5-turbo"}";\n`,
diff --git a/packages/create-llama/templates/types/simple/express/constants.ts b/packages/create-llama/templates/types/simple/express/constants.ts
new file mode 100644
index 000000000..908949251
--- /dev/null
+++ b/packages/create-llama/templates/types/simple/express/constants.ts
@@ -0,0 +1 @@
+export const MODEL = "gpt-3.5-turbo";
diff --git a/packages/create-llama/templates/types/simple/express/src/controllers/chat.controller.ts b/packages/create-llama/templates/types/simple/express/src/controllers/chat.controller.ts
index dd4532ffd..6612971ac 100644
--- a/packages/create-llama/templates/types/simple/express/src/controllers/chat.controller.ts
+++ b/packages/create-llama/templates/types/simple/express/src/controllers/chat.controller.ts
@@ -1,5 +1,6 @@
 import { NextFunction, Request, Response } from "express";
 import { ChatMessage, OpenAI } from "llamaindex";
+import { MODEL } from "../../constants";
 import { createChatEngine } from "./engine";
 
 export const chat = async (req: Request, res: Response, next: NextFunction) => {
@@ -14,7 +15,7 @@ export const chat = async (req: Request, res: Response, next: NextFunction) => {
     }
 
     const llm = new OpenAI({
-      model: "gpt-3.5-turbo",
+      model: MODEL,
     });
 
     const chatEngine = await createChatEngine(llm);
diff --git a/packages/create-llama/templates/types/streaming/express/constants.ts b/packages/create-llama/templates/types/streaming/express/constants.ts
new file mode 100644
index 000000000..908949251
--- /dev/null
+++ b/packages/create-llama/templates/types/streaming/express/constants.ts
@@ -0,0 +1 @@
+export const MODEL = "gpt-3.5-turbo";
diff --git a/packages/create-llama/templates/types/streaming/express/src/controllers/chat.controller.ts b/packages/create-llama/templates/types/streaming/express/src/controllers/chat.controller.ts
index efa054796..76b8fafdb 100644
--- a/packages/create-llama/templates/types/streaming/express/src/controllers/chat.controller.ts
+++ b/packages/create-llama/templates/types/streaming/express/src/controllers/chat.controller.ts
@@ -1,6 +1,7 @@
 import { streamToResponse } from "ai";
 import { NextFunction, Request, Response } from "express";
 import { ChatMessage, OpenAI } from "llamaindex";
+import { MODEL } from "../../constants";
 import { createChatEngine } from "./engine";
 import { LlamaIndexStream } from "./llamaindex-stream";
 
@@ -16,7 +17,7 @@ export const chat = async (req: Request, res: Response, next: NextFunction) => {
     }
 
     const llm = new OpenAI({
-      model: "gpt-3.5-turbo",
+      model: MODEL,
     });
 
     const chatEngine = await createChatEngine(llm);
-- 
GitLab