From 5a7d8add6faf35bd3a0d03b8fb009632c8c8f763 Mon Sep 17 00:00:00 2001
From: Timothy Carambat <rambat1010@gmail.com>
Date: Thu, 20 Jul 2023 11:14:23 -0700
Subject: [PATCH] [Fork] Additions on  franzbischoff resolution on #122 (#152)

* Related to Issue #122, Implemented custom prompt in workspace settings.

* run linter

* Remove code duplication for chat prompt injection

---------

Co-authored-by: Francisco Bischoff <franzbischoff@gmail.com>
---
 frontend/.eslintrc.cjs                        | 29 ++++++++++--------
 .../Modals/MangeWorkspace/Settings/index.jsx  | 30 +++++++++++++++++++
 .../Modals/Settings/ExportImport/index.jsx    |  4 ++-
 frontend/src/components/Sidebar/index.jsx     |  5 ++--
 frontend/src/utils/chat/index.js              |  7 +++++
 server/models/workspace.js                    |  9 +++++-
 server/utils/chats/index.js                   |  9 ++++++
 .../utils/vectorDbProviders/chroma/index.js   |  5 ++--
 server/utils/vectorDbProviders/lance/index.js |  5 ++--
 .../utils/vectorDbProviders/pinecone/index.js | 18 ++++++-----
 10 files changed, 93 insertions(+), 28 deletions(-)

diff --git a/frontend/.eslintrc.cjs b/frontend/.eslintrc.cjs
index ec601b2ce..1bf2b3226 100644
--- a/frontend/.eslintrc.cjs
+++ b/frontend/.eslintrc.cjs
@@ -1,15 +1,20 @@
 module.exports = {
-  env: { browser: true, es2020: true },
-  extends: [
-    'eslint:recommended',
-    'plugin:react/recommended',
-    'plugin:react/jsx-runtime',
-    'plugin:react-hooks/recommended',
+  "env": { "browser": true, "es2020": true },
+  "extends": [
+    "eslint:recommended",
+    "plugin:react/recommended",
+    "plugin:react/jsx-runtime",
+    "plugin:react-hooks/recommended"
   ],
-  parserOptions: { ecmaVersion: 'latest', sourceType: 'module' },
-  settings: { react: { version: '18.2' } },
-  plugins: ['react-refresh'],
-  rules: {
-    'react-refresh/only-export-components': 'warn',
-  },
+  "files": ["**/*.js", "**/*.jsx"],
+  "linterOptions": { "reportUnusedDisableDirectives": true },
+  "parserOptions": { "ecmaVersion": "latest", "sourceType": "module", "ecmaFeatures": { "jsx": true } },
+  "settings": { "react": { "version": '18.2' } },
+  "plugins": [
+    "react-refresh",
+    "react-hooks"
+  ],
+  "rules": {
+    "react-refresh/only-export-components": "warn"
+  }
 }
diff --git a/frontend/src/components/Modals/MangeWorkspace/Settings/index.jsx b/frontend/src/components/Modals/MangeWorkspace/Settings/index.jsx
index 1ff13e772..7dfc2ccc1 100644
--- a/frontend/src/components/Modals/MangeWorkspace/Settings/index.jsx
+++ b/frontend/src/components/Modals/MangeWorkspace/Settings/index.jsx
@@ -1,6 +1,7 @@
 import React, { useState, useRef, useEffect } from "react";
 import Workspace from "../../../../models/workspace";
 import paths from "../../../../utils/paths";
+import { chatPrompt } from "../../../../utils/chat";
 
 export default function WorkspaceSettings({ workspace }) {
   const formEl = useRef(null);
@@ -141,6 +142,35 @@ export default function WorkspaceSettings({ workspace }) {
               />
             </div>
 
+            <div>
+              <div className="flex flex-col gap-y-1 mb-4">
+                <label
+                  htmlFor="name"
+                  className="block text-sm font-medium text-gray-900 dark:text-white"
+                >
+                  Prompt
+                </label>
+                <p className="text-xs text-gray-600 dark:text-stone-400">
+                  The prompt that will be used on this workspace. Define the
+                  context and instructions for the AI to generate a response.
+                  You should to provide a carefully crafted prompt so the AI can
+                  generate a relevant and accurate response.
+                </p>
+              </div>
+              <textarea
+                name="openAiPrompt"
+                maxLength={500}
+                rows={5}
+                defaultValue={chatPrompt(workspace)}
+                className="bg-gray-50 border border-gray-300 text-gray-900 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5 dark:bg-stone-600 dark:border-stone-600 dark:placeholder-gray-400 dark:text-white dark:focus:ring-blue-500 dark:focus:border-blue-500"
+                placeholder="Given the following conversation, relevant context, and a follow up question, reply with an answer to the current question the user is asking. Return only your response to the question given the above information following the users instructions as needed."
+                required={true}
+                wrap="soft"
+                autoComplete="off"
+                onChange={() => setHasChanges(true)}
+              />
+            </div>
+
             <div>
               <div className="flex flex-col gap-y-1 mb-4">
                 <label
diff --git a/frontend/src/components/Modals/Settings/ExportImport/index.jsx b/frontend/src/components/Modals/Settings/ExportImport/index.jsx
index 169feb89c..4099e8c06 100644
--- a/frontend/src/components/Modals/Settings/ExportImport/index.jsx
+++ b/frontend/src/components/Modals/Settings/ExportImport/index.jsx
@@ -181,7 +181,9 @@ function ImportData() {
             Import was completed successfully
           </p>
         </div>
-        <p className="text-green-800 text-xs italic">please reload the page to see the results of the import.</p>
+        <p className="text-green-800 text-xs italic">
+          please reload the page to see the results of the import.
+        </p>
       </div>
     );
   }
diff --git a/frontend/src/components/Sidebar/index.jsx b/frontend/src/components/Sidebar/index.jsx
index d7cdabdbb..880e54fde 100644
--- a/frontend/src/components/Sidebar/index.jsx
+++ b/frontend/src/components/Sidebar/index.jsx
@@ -185,10 +185,11 @@ export function SidebarMobileHeader() {
         className={`z-99 fixed top-0 left-0 transition-all duration-500 w-[100vw] h-[100vh]`}
       >
         <div
-          className={`${showBgOverlay
+          className={`${
+            showBgOverlay
               ? "transition-all opacity-1"
               : "transition-none opacity-0"
-            }  duration-500 fixed top-0 left-0 bg-black-900 bg-opacity-75 w-screen h-screen`}
+          }  duration-500 fixed top-0 left-0 bg-black-900 bg-opacity-75 w-screen h-screen`}
           onClick={() => setShowSidebar(false)}
         />
         <div
diff --git a/frontend/src/utils/chat/index.js b/frontend/src/utils/chat/index.js
index 539e22c7e..35a911d0d 100644
--- a/frontend/src/utils/chat/index.js
+++ b/frontend/src/utils/chat/index.js
@@ -56,3 +56,10 @@ export default function handleChat(
     });
   }
 }
+
+export function chatPrompt(workspace) {
+  return (
+    workspace?.openAiPrompt ??
+    "Given the following conversation, relevant context, and a follow up question, reply with an answer to the current question the user is asking. Return only your response to the question given the above information following the users instructions as needed."
+  );
+}
diff --git a/server/models/workspace.js b/server/models/workspace.js
index 1f4ca84af..fed92434b 100644
--- a/server/models/workspace.js
+++ b/server/models/workspace.js
@@ -12,6 +12,7 @@ const Workspace = {
     "openAiTemp",
     "openAiHistory",
     "lastUpdatedAt",
+    "openAiPrompt",
   ],
   colsInit: `
   id INTEGER PRIMARY KEY AUTOINCREMENT,
@@ -21,7 +22,8 @@ const Workspace = {
   createdAt TEXT DEFAULT CURRENT_TIMESTAMP,
   openAiTemp REAL DEFAULT NULL,
   openAiHistory INTEGER DEFAULT 20,
-  lastUpdatedAt TEXT DEFAULT CURRENT_TIMESTAMP
+  lastUpdatedAt TEXT DEFAULT CURRENT_TIMESTAMP,
+  openAiPrompt TEXT DEFAULT NULL
   `,
   migrateTable: async function () {
     console.log(`\x1b[34m[MIGRATING]\x1b[0m Checking for Workspace migrations`);
@@ -35,6 +37,11 @@ const Workspace = {
         execCmd: `ALTER TABLE ${this.tablename} ADD COLUMN openAiTemp REAL DEFAULT NULL`,
         doif: false,
       },
+      {
+        colName: "openAiPrompt",
+        execCmd: `ALTER TABLE ${this.tablename} ADD COLUMN openAiPrompt TEXT DEFAULT NULL`,
+        doif: false,
+      },
       {
         colName: "id",
         execCmd: `CREATE TRIGGER IF NOT EXISTS Trg_LastUpdated AFTER UPDATE ON ${this.tablename}
diff --git a/server/utils/chats/index.js b/server/utils/chats/index.js
index cd4c2942e..800003e21 100644
--- a/server/utils/chats/index.js
+++ b/server/utils/chats/index.js
@@ -148,7 +148,16 @@ async function chatWithWorkspace(workspace, message, chatMode = "chat") {
     };
   }
 }
+
+function chatPrompt(workspace) {
+  return (
+    workspace?.openAiPrompt ??
+    "Given the following conversation, relevant context, and a follow up question, reply with an answer to the current question the user is asking. Return only your response to the question given the above information following the users instructions as needed."
+  );
+}
+
 module.exports = {
   convertToChatHistory,
   chatWithWorkspace,
+  chatPrompt,
 };
diff --git a/server/utils/vectorDbProviders/chroma/index.js b/server/utils/vectorDbProviders/chroma/index.js
index 1fb324aa6..ddeb3afe7 100644
--- a/server/utils/vectorDbProviders/chroma/index.js
+++ b/server/utils/vectorDbProviders/chroma/index.js
@@ -8,6 +8,7 @@ const { storeVectorResult, cachedVectorInformation } = require("../../files");
 const { Configuration, OpenAIApi } = require("openai");
 const { v4: uuidv4 } = require("uuid");
 const { toChunks, curateSources } = require("../../helpers");
+const { chatPrompt } = require("../../chats");
 
 const Chroma = {
   name: "Chroma",
@@ -303,7 +304,7 @@ const Chroma = {
       { collectionName: namespace, url: process.env.CHROMA_ENDPOINT }
     );
     const model = this.llm({
-      temperature: workspace?.openAiTemp,
+      temperature: workspace?.openAiTemp ?? 0.7,
     });
 
     const chain = VectorDBQAChain.fromLLM(model, vectorStore, {
@@ -347,7 +348,7 @@ const Chroma = {
     );
     const prompt = {
       role: "system",
-      content: `Given the following conversation, relevant context, and a follow up question, reply with an answer to the current question the user is asking. Return only your response to the question given the above information following the users instructions as needed.
+      content: `${chatPrompt(workspace)}
     Context:
     ${contextTexts
       .map((text, i) => {
diff --git a/server/utils/vectorDbProviders/lance/index.js b/server/utils/vectorDbProviders/lance/index.js
index 293e835a1..21d962ffa 100644
--- a/server/utils/vectorDbProviders/lance/index.js
+++ b/server/utils/vectorDbProviders/lance/index.js
@@ -5,6 +5,7 @@ const { RecursiveCharacterTextSplitter } = require("langchain/text_splitter");
 const { storeVectorResult, cachedVectorInformation } = require("../../files");
 const { Configuration, OpenAIApi } = require("openai");
 const { v4: uuidv4 } = require("uuid");
+const { chatPrompt } = require("../../chats");
 
 // Since we roll our own results for prompting we
 // have to manually curate sources as well.
@@ -260,7 +261,7 @@ const LanceDb = {
     );
     const prompt = {
       role: "system",
-      content: `Given the following conversation, relevant context, and a follow up question, reply with an answer to the current question the user is asking. Return only your response to the question given the above information following the users instructions as needed.
+      content: `${chatPrompt(workspace)}
     Context:
     ${contextTexts
       .map((text, i) => {
@@ -309,7 +310,7 @@ const LanceDb = {
     );
     const prompt = {
       role: "system",
-      content: `Given the following conversation, relevant context, and a follow up question, reply with an answer to the current question the user is asking. Return only your response to the question given the above information following the users instructions as needed.
+      content: `${chatPrompt(workspace)}
     Context:
     ${contextTexts
       .map((text, i) => {
diff --git a/server/utils/vectorDbProviders/pinecone/index.js b/server/utils/vectorDbProviders/pinecone/index.js
index dc984f7fa..67e4d1efb 100644
--- a/server/utils/vectorDbProviders/pinecone/index.js
+++ b/server/utils/vectorDbProviders/pinecone/index.js
@@ -10,6 +10,7 @@ const { storeVectorResult, cachedVectorInformation } = require("../../files");
 const { Configuration, OpenAIApi } = require("openai");
 const { v4: uuidv4 } = require("uuid");
 const { toChunks, curateSources } = require("../../helpers");
+const { chatPrompt } = require("../../chats");
 
 const Pinecone = {
   name: "Pinecone",
@@ -278,7 +279,7 @@ const Pinecone = {
     });
 
     const model = this.llm({
-      temperature: workspace?.openAiTemp,
+      temperature: workspace?.openAiTemp ?? 0.7,
     });
     const chain = VectorDBQAChain.fromLLM(model, vectorStore, {
       k: 5,
@@ -318,14 +319,15 @@ const Pinecone = {
     );
     const prompt = {
       role: "system",
-      content: `Given the following conversation, relevant context, and a follow up question, reply with an answer to the current question the user is asking. Return only your response to the question given the above information following the users instructions as needed.
-Context:
-${contextTexts
-  .map((text, i) => {
-    return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
-  })
-  .join("")}`,
+      content: `${chatPrompt(workspace)}
+    Context:
+    ${contextTexts
+      .map((text, i) => {
+        return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+      })
+      .join("")}`,
     };
+
     const memory = [prompt, ...chatHistory, { role: "user", content: input }];
 
     const responseText = await this.getChatCompletion(this.openai(), memory, {
-- 
GitLab