Skip to content
Snippets Groups Projects
Commit c15e5532 authored by thucpn's avatar thucpn Committed by Marcus Schiesser
Browse files

feat: option to select model for express

parent 0576fcdc
No related branches found
No related tags found
No related merge requests found
...@@ -189,7 +189,7 @@ export const askQuestions = async ( ...@@ -189,7 +189,7 @@ export const askQuestions = async (
} }
} }
if (program.framework === "nextjs") { if (program.framework === "express" || program.framework === "nextjs") {
if (!program.model) { if (!program.model) {
if (ciInfo.isCI) { if (ciInfo.isCI) {
program.model = getPrefOrDefault("model"); program.model = getPrefOrDefault("model");
......
...@@ -176,7 +176,7 @@ const installTSTemplate = async ({ ...@@ -176,7 +176,7 @@ const installTSTemplate = async ({
}); });
} }
if (framework === "nextjs") { if (framework === "nextjs" || framework === "express") {
await fs.writeFile( await fs.writeFile(
path.join(root, "constants.ts"), path.join(root, "constants.ts"),
`export const MODEL = "${model || "gpt-3.5-turbo"}";\n`, `export const MODEL = "${model || "gpt-3.5-turbo"}";\n`,
......
export const MODEL = "gpt-3.5-turbo";
import { NextFunction, Request, Response } from "express"; import { NextFunction, Request, Response } from "express";
import { ChatMessage, OpenAI } from "llamaindex"; import { ChatMessage, OpenAI } from "llamaindex";
import { MODEL } from "../../constants";
import { createChatEngine } from "./engine"; import { createChatEngine } from "./engine";
export const chat = async (req: Request, res: Response, next: NextFunction) => { export const chat = async (req: Request, res: Response, next: NextFunction) => {
...@@ -14,7 +15,7 @@ export const chat = async (req: Request, res: Response, next: NextFunction) => { ...@@ -14,7 +15,7 @@ export const chat = async (req: Request, res: Response, next: NextFunction) => {
} }
const llm = new OpenAI({ const llm = new OpenAI({
model: "gpt-3.5-turbo", model: MODEL,
}); });
const chatEngine = await createChatEngine(llm); const chatEngine = await createChatEngine(llm);
......
export const MODEL = "gpt-3.5-turbo";
import { streamToResponse } from "ai"; import { streamToResponse } from "ai";
import { NextFunction, Request, Response } from "express"; import { NextFunction, Request, Response } from "express";
import { ChatMessage, OpenAI } from "llamaindex"; import { ChatMessage, OpenAI } from "llamaindex";
import { MODEL } from "../../constants";
import { createChatEngine } from "./engine"; import { createChatEngine } from "./engine";
import { LlamaIndexStream } from "./llamaindex-stream"; import { LlamaIndexStream } from "./llamaindex-stream";
...@@ -16,7 +17,7 @@ export const chat = async (req: Request, res: Response, next: NextFunction) => { ...@@ -16,7 +17,7 @@ export const chat = async (req: Request, res: Response, next: NextFunction) => {
} }
const llm = new OpenAI({ const llm = new OpenAI({
model: "gpt-3.5-turbo", model: MODEL,
}); });
const chatEngine = await createChatEngine(llm); const chatEngine = await createChatEngine(llm);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment