diff --git a/templates/index.ts b/templates/index.ts index 8392a7b07aa3aed2f30081400b4c64e544dc2c73..615214b1c32b847d01c6ddf4eb42024cbac360bb 100644 --- a/templates/index.ts +++ b/templates/index.ts @@ -19,17 +19,24 @@ import { const createEnvLocalFile = async ( root: string, - openAiKey?: string, - vectorDb?: TemplateVectorDB, + opts?: { + openAiKey?: string; + vectorDb?: TemplateVectorDB; + model?: string; + }, ) => { const envFileName = ".env"; let content = ""; - if (openAiKey) { - content += `OPENAI_API_KEY=${openAiKey}\n`; + const model = opts?.model || "gpt-3.5-turbo"; + content += `NEXT_PUBLIC_MODEL=${model}\n`; + content += `MODEL=${model}\n`; + + if (opts?.openAiKey) { + content += `OPENAI_API_KEY=${opts?.openAiKey}\n`; } - switch (vectorDb) { + switch (opts?.vectorDb) { case "mongo": { content += `MONGODB_URI=\n`; content += `MONGODB_DATABASE=\n`; @@ -205,10 +212,6 @@ const installTSTemplate = async ({ } if (framework === "nextjs" || framework === "express") { - await fs.writeFile( - path.join(root, "constants.ts"), - `export const MODEL = "${model || "gpt-3.5-turbo"}";\n`, - ); console.log("\nUsing OpenAI model: ", model || "gpt-3.5-turbo", "\n"); } @@ -369,7 +372,11 @@ export const installTemplate = async ( // This is a backend, so we need to copy the test data and create the env file. // Copy the environment file to the target directory. - await createEnvLocalFile(props.root, props.openAiKey, props.vectorDb); + await createEnvLocalFile(props.root, { + openAiKey: props.openAiKey, + vectorDb: props.vectorDb, + model: props.model, + }); // Copy test pdf file await copyTestData( diff --git a/templates/types/simple/express/constants.ts b/templates/types/simple/express/constants.ts deleted file mode 100644 index 908949251c708651be6d0ae6b09c2982e65b0a1a..0000000000000000000000000000000000000000 --- a/templates/types/simple/express/constants.ts +++ /dev/null @@ -1 +0,0 @@ -export const MODEL = "gpt-3.5-turbo"; diff --git a/templates/types/simple/express/src/controllers/chat.controller.ts b/templates/types/simple/express/src/controllers/chat.controller.ts index 8aa08613f4601f2b142c065028a22e9ef2d4ea74..63a1b1ddd074de40f5bb12ef7e5823949904c3a9 100644 --- a/templates/types/simple/express/src/controllers/chat.controller.ts +++ b/templates/types/simple/express/src/controllers/chat.controller.ts @@ -1,6 +1,5 @@ import { Request, Response } from "express"; import { ChatMessage, MessageContent, OpenAI } from "llamaindex"; -import { MODEL } from "../../constants"; import { createChatEngine } from "./engine"; const getLastMessageContent = ( @@ -34,7 +33,7 @@ export const chat = async (req: Request, res: Response) => { } const llm = new OpenAI({ - model: MODEL, + model: process.env.MODEL, }); const lastMessageContent = getLastMessageContent( diff --git a/templates/types/streaming/express/constants.ts b/templates/types/streaming/express/constants.ts deleted file mode 100644 index 908949251c708651be6d0ae6b09c2982e65b0a1a..0000000000000000000000000000000000000000 --- a/templates/types/streaming/express/constants.ts +++ /dev/null @@ -1 +0,0 @@ -export const MODEL = "gpt-3.5-turbo"; diff --git a/templates/types/streaming/express/src/controllers/chat.controller.ts b/templates/types/streaming/express/src/controllers/chat.controller.ts index 1dbd85d45f157bf5e98741e512826f27e420a15a..f1f50b48be68ab06c9431fe789cee695b0f54f5f 100644 --- a/templates/types/streaming/express/src/controllers/chat.controller.ts +++ b/templates/types/streaming/express/src/controllers/chat.controller.ts @@ -1,7 +1,6 @@ import { streamToResponse } from "ai"; import { Request, Response } from "express"; import { ChatMessage, MessageContent, OpenAI } from "llamaindex"; -import { MODEL } from "../../constants"; import { createChatEngine } from "./engine"; import { LlamaIndexStream } from "./llamaindex-stream"; @@ -36,7 +35,7 @@ export const chat = async (req: Request, res: Response) => { } const llm = new OpenAI({ - model: MODEL, + model: process.env.MODEL, }); const chatEngine = await createChatEngine(llm); diff --git a/templates/types/streaming/nextjs/app/api/chat/route.ts b/templates/types/streaming/nextjs/app/api/chat/route.ts index 99fea0f618eb9b007ba0b7f8349522957e15ed92..b83855bc1b43fa4159e0a2fd98ade21d8064fbfa 100644 --- a/templates/types/streaming/nextjs/app/api/chat/route.ts +++ b/templates/types/streaming/nextjs/app/api/chat/route.ts @@ -1,4 +1,3 @@ -import { MODEL } from "@/constants"; import { Message, StreamingTextResponse } from "ai"; import { MessageContent, OpenAI } from "llamaindex"; import { NextRequest, NextResponse } from "next/server"; @@ -43,7 +42,7 @@ export async function POST(request: NextRequest) { } const llm = new OpenAI({ - model: MODEL, + model: process.env.MODEL, maxTokens: 512, }); diff --git a/templates/types/streaming/nextjs/app/components/chat-section.tsx b/templates/types/streaming/nextjs/app/components/chat-section.tsx index b42edb2731f33a4394027569416b26f214c90c13..e51eeef329741f7833b3119f9f44c6378921bb1d 100644 --- a/templates/types/streaming/nextjs/app/components/chat-section.tsx +++ b/templates/types/streaming/nextjs/app/components/chat-section.tsx @@ -1,6 +1,5 @@ "use client"; -import { MODEL } from "@/constants"; import { useChat } from "ai/react"; import { ChatInput, ChatMessages } from "./ui/chat"; @@ -33,7 +32,7 @@ export default function ChatSection() { handleSubmit={handleSubmit} handleInputChange={handleInputChange} isLoading={isLoading} - multiModal={MODEL === "gpt-4-vision-preview"} + multiModal={process.env.NEXT_PUBLIC_MODEL === "gpt-4-vision-preview"} /> </div> ); diff --git a/templates/types/streaming/nextjs/constants.ts b/templates/types/streaming/nextjs/constants.ts deleted file mode 100644 index 0959a5f6f4f9301f2559cf5b04c36de0ef3afe4a..0000000000000000000000000000000000000000 --- a/templates/types/streaming/nextjs/constants.ts +++ /dev/null @@ -1 +0,0 @@ -export const MODEL = "gpt-4-vision-preview";