From b8345f84b017c80b210c497d57daa1d89cf97f12 Mon Sep 17 00:00:00 2001
From: Marcus Schiesser <mail@marcusschiesser.de>
Date: Fri, 22 Dec 2023 12:49:13 +0700
Subject: [PATCH] feat: add /api/chat e2e test (uses openai key) (#287)

* feat: allow custom external port

---------

Co-authored-by: thucpn <thucsh2@gmail.com>
---
 create-app.ts      |  4 +-
 e2e/basic.spec.ts  | 96 ++++++++++++++++++++++++++++++++++++----------
 e2e/utils.ts       |  8 +++-
 index.ts           | 10 ++++-
 templates/types.ts |  1 +
 5 files changed, 94 insertions(+), 25 deletions(-)

diff --git a/create-app.ts b/create-app.ts
index d835af8a..2e7f4c6e 100644
--- a/create-app.ts
+++ b/create-app.ts
@@ -33,6 +33,7 @@ export async function createApp({
   model,
   communityProjectPath,
   vectorDb,
+  externalPort,
 }: InstallAppArgs): Promise<void> {
   const root = path.resolve(appPath);
 
@@ -73,6 +74,7 @@ export async function createApp({
     model,
     communityProjectPath,
     vectorDb,
+    externalPort,
   };
 
   if (frontend) {
@@ -87,7 +89,7 @@ export async function createApp({
       ...args,
       root: frontendRoot,
       framework: "nextjs",
-      customApiPath: "http://localhost:8000/api/chat",
+      customApiPath: `http://localhost:${externalPort ?? 8000}/api/chat`,
       backend: false,
     });
     // copy readme for fullstack
diff --git a/e2e/basic.spec.ts b/e2e/basic.spec.ts
index dbc44114..1a0da5af 100644
--- a/e2e/basic.spec.ts
+++ b/e2e/basic.spec.ts
@@ -1,5 +1,6 @@
 /* eslint-disable turbo/no-undeclared-env-vars */
 import { expect, test } from "@playwright/test";
+import { ChildProcess } from "child_process";
 import type {
   TemplateEngine,
   TemplateFramework,
@@ -31,30 +32,83 @@ for (const templateType of templateTypes) {
               ? "--no-frontend" // simple templates don't have frontends
               : "--frontend"
             : "";
-        test(`try create-llama ${templateType} ${templateFramework} ${templateEngine} ${templateUI} ${appType}`, async ({
-          page,
-        }) => {
-          const cwd = await createTestDir();
-          const name = runCreateLlama(
-            cwd,
-            templateType,
-            templateFramework,
-            templateEngine,
-            templateUI,
-            appType,
-          );
-
-          const port = Math.floor(Math.random() * 10000) + 10000;
-          const cps = await runApp(cwd, name, appType, port);
-
-          // test frontend
-          if (appType !== "--no-frontend") {
+        test.describe(`try create-llama ${templateType} ${templateFramework} ${templateEngine} ${templateUI} ${appType}`, async () => {
+          let port: number;
+          let externalPort: number;
+          let cwd: string;
+          let name: string;
+          let cps: ChildProcess[];
+
+          test.beforeAll(async () => {
+            port = Math.floor(Math.random() * 10000) + 10000;
+            externalPort = port + 1;
+
+            cwd = await createTestDir();
+            name = runCreateLlama(
+              cwd,
+              templateType,
+              templateFramework,
+              templateEngine,
+              templateUI,
+              appType,
+              externalPort,
+            );
+
+            cps = await runApp(cwd, name, appType, port, externalPort);
+          });
+
+          test("Frontend should have a title", async ({ page }) => {
+            test.skip(appType === "--no-frontend");
             await page.goto(`http://localhost:${port}`);
             await expect(page.getByText("Built by LlamaIndex")).toBeVisible();
-          }
-          // TODO: test backend using curl (would need OpenAI key)
+          });
+
+          test("Frontend should be able to submit a message and receive a response", async ({
+            page,
+          }) => {
+            test.skip(appType === "--no-frontend");
+            await page.goto(`http://localhost:${port}`);
+            await page.fill("form input", "hello");
+            await page.click("form button[type=submit]");
+            const response = await page.waitForResponse(
+              (res) => {
+                return res.url().includes("/api/chat") && res.status() === 200;
+              },
+              {
+                timeout: 1000 * 60,
+              },
+            );
+            const text = await response.text();
+            console.log("AI response when submitting message: ", text);
+            expect(response.ok()).toBeTruthy();
+          });
+
+          test("Backend should response when calling API", async ({
+            request,
+          }) => {
+            test.skip(appType !== "--no-frontend");
+            const response = await request.post(
+              `http://localhost:${port}/api/chat`,
+              {
+                data: {
+                  messages: [
+                    {
+                      role: "user",
+                      content: "Hello",
+                    },
+                  ],
+                },
+              },
+            );
+            const text = await response.text();
+            console.log("AI response when calling API: ", text);
+            expect(response.ok()).toBeTruthy();
+          });
+
           // clean processes
-          cps.forEach((cp) => cp.kill());
+          test.afterAll(async () => {
+            cps.map((cp) => cp.kill());
+          });
         });
       }
     }
diff --git a/e2e/utils.ts b/e2e/utils.ts
index 2716254a..8bbc1330 100644
--- a/e2e/utils.ts
+++ b/e2e/utils.ts
@@ -12,6 +12,7 @@ export async function runApp(
   name: string,
   appType: AppType,
   port: number,
+  externalPort: number,
 ): Promise<ChildProcess[]> {
   const cps: ChildProcess[] = [];
 
@@ -22,7 +23,7 @@ export async function runApp(
           await createProcess(
             "npm run dev",
             path.join(cwd, name, "backend"),
-            port + 1,
+            externalPort,
           ),
         );
         cps.push(
@@ -71,6 +72,7 @@ export function runCreateLlama(
   templateEngine: string,
   templateUI: string,
   appType: AppType,
+  externalPort: number,
 ) {
   const createLlama = path.join(__dirname, "..", "dist", "index.js");
 
@@ -96,10 +98,12 @@ export function runCreateLlama(
     "--model",
     MODEL,
     "--open-ai-key",
-    "testKey",
+    process.env.OPENAI_API_KEY || "testKey",
     appType,
     "--eslint",
     "--use-npm",
+    "--external-port",
+    externalPort,
   ].join(" ");
   console.log(`running command '${command}' in ${cwd}`);
   execSync(command, {
diff --git a/index.ts b/index.ts
index 12d06f3d..84396fa5 100644
--- a/index.ts
+++ b/index.ts
@@ -106,10 +106,17 @@ const program = new Commander.Command(packageJson.name)
 `,
   )
   .option(
-    "--model",
+    "--model <model>",
     `
 
   Select OpenAI model to use. E.g. gpt-3.5-turbo.
+`,
+  )
+  .option(
+    "--external-port <external>",
+    `
+
+Select external port.
 `,
   )
   .allowUnknownOption()
@@ -210,6 +217,7 @@ async function run(): Promise<void> {
     model: program.model,
     communityProjectPath: program.communityProjectPath,
     vectorDb: program.vectorDb,
+    externalPort: program.externalPort,
   });
   conf.set("preferences", preferences);
 }
diff --git a/templates/types.ts b/templates/types.ts
index 4b905ef9..6fc7b47c 100644
--- a/templates/types.ts
+++ b/templates/types.ts
@@ -22,4 +22,5 @@ export interface InstallTemplateArgs {
   model: string;
   communityProjectPath?: string;
   vectorDb?: TemplateVectorDB;
+  externalPort?: number;
 }
-- 
GitLab