Skip to content
Snippets Groups Projects
Commit 09d4e362 authored by thucpn's avatar thucpn Committed by Marcus Schiesser
Browse files

feat: create chat engine folder for python

parent 8fb523bc
Branches
Tags
No related merge requests found
Showing
with 90 additions and 32 deletions
...@@ -189,7 +189,11 @@ export const askQuestions = async ( ...@@ -189,7 +189,11 @@ export const askQuestions = async (
} }
} }
if (program.framework === "express" || program.framework === "nextjs") { if (
program.framework === "express" ||
program.framework === "nextjs" ||
program.framework === "fastapi"
) {
if (!program.model) { if (!program.model) {
if (ciInfo.isCI) { if (ciInfo.isCI) {
program.model = getPrefOrDefault("model"); program.model = getPrefOrDefault("model");
...@@ -218,7 +222,11 @@ export const askQuestions = async ( ...@@ -218,7 +222,11 @@ export const askQuestions = async (
} }
} }
if (program.framework === "express" || program.framework === "nextjs") { if (
program.framework === "express" ||
program.framework === "nextjs" ||
program.framework === "fastapi"
) {
if (!program.engine) { if (!program.engine) {
if (ciInfo.isCI) { if (ciInfo.isCI) {
program.engine = getPrefOrDefault("engine"); program.engine = getPrefOrDefault("engine");
...@@ -243,7 +251,11 @@ export const askQuestions = async ( ...@@ -243,7 +251,11 @@ export const askQuestions = async (
preferences.engine = engine; preferences.engine = engine;
} }
} }
if (program.engine !== "simple" && !program.vectorDb) { if (
program.engine !== "simple" &&
!program.vectorDb &&
program.framework !== "fastapi"
) {
if (ciInfo.isCI) { if (ciInfo.isCI) {
program.vectorDb = getPrefOrDefault("vectorDb"); program.vectorDb = getPrefOrDefault("vectorDb");
} else { } else {
......
import logging
import os
from llama_index import (
StorageContext,
load_index_from_storage,
)
from app.engine.constants import STORAGE_DIR
from app.engine.context import create_service_context
def get_chat_engine():
service_context = create_service_context()
# check if storage already exists
if not os.path.exists(STORAGE_DIR):
raise Exception(
"StorageContext is empty - call 'npm run generate' to generate the storage first"
)
logger = logging.getLogger("uvicorn")
# load the existing index
logger.info(f"Loading index from {STORAGE_DIR}...")
storage_context = StorageContext.from_defaults(persist_dir=STORAGE_DIR)
index = load_index_from_storage(storage_context, service_context=service_context)
logger.info(f"Finished loading index from {STORAGE_DIR}")
return index.as_chat_engine()
...@@ -311,7 +311,8 @@ const installPythonTemplate = async ({ ...@@ -311,7 +311,8 @@ const installPythonTemplate = async ({
root, root,
template, template,
framework, framework,
}: Pick<InstallTemplateArgs, "root" | "framework" | "template">) => { engine,
}: Pick<InstallTemplateArgs, "root" | "framework" | "template" | "engine">) => {
console.log("\nInitializing Python project with template:", template, "\n"); console.log("\nInitializing Python project with template:", template, "\n");
const templatePath = path.join(__dirname, "types", template, framework); const templatePath = path.join(__dirname, "types", template, framework);
await copy("**", root, { await copy("**", root, {
...@@ -334,6 +335,15 @@ const installPythonTemplate = async ({ ...@@ -334,6 +335,15 @@ const installPythonTemplate = async ({
}, },
}); });
if (engine === "context") {
const compPath = path.join(__dirname, "components");
const VectorDBPath = path.join(compPath, "vectordbs", "python", "none");
await copy("**", path.join(root, "app", "engine"), {
parents: true,
cwd: VectorDBPath,
});
}
console.log( console.log(
"\nPython project, dependencies won't be installed automatically.\n", "\nPython project, dependencies won't be installed automatically.\n",
); );
......
...@@ -4,7 +4,7 @@ from fastapi import APIRouter, Depends, HTTPException, status ...@@ -4,7 +4,7 @@ from fastapi import APIRouter, Depends, HTTPException, status
from llama_index import VectorStoreIndex from llama_index import VectorStoreIndex
from llama_index.llms.base import MessageRole, ChatMessage from llama_index.llms.base import MessageRole, ChatMessage
from pydantic import BaseModel from pydantic import BaseModel
from app.context import get_index from app.engine.index import get_chat_engine
chat_router = r = APIRouter() chat_router = r = APIRouter()
...@@ -25,7 +25,7 @@ class _Result(BaseModel): ...@@ -25,7 +25,7 @@ class _Result(BaseModel):
@r.post("") @r.post("")
async def chat( async def chat(
data: _ChatData, data: _ChatData,
index: VectorStoreIndex = Depends(get_index), chat_engine: VectorStoreIndex = Depends(get_chat_engine),
) -> _Result: ) -> _Result:
# check preconditions and get last message # check preconditions and get last message
if len(data.messages) == 0: if len(data.messages) == 0:
...@@ -49,7 +49,6 @@ async def chat( ...@@ -49,7 +49,6 @@ async def chat(
] ]
# query chat engine # query chat engine
chat_engine = index.as_chat_engine()
response = chat_engine.chat(lastMessage.content, messages) response = chat_engine.chat(lastMessage.content, messages)
return _Result( return _Result(
result=_Message(role=MessageRole.ASSISTANT, content=response.response) result=_Message(role=MessageRole.ASSISTANT, content=response.response)
......
...@@ -38,3 +38,7 @@ def get_index(): ...@@ -38,3 +38,7 @@ def get_index():
index = load_index_from_storage(storage_context,service_context=service_context) index = load_index_from_storage(storage_context,service_context=service_context)
logger.info(f"Finished loading index from {STORAGE_DIR}") logger.info(f"Finished loading index from {STORAGE_DIR}")
return index return index
def get_chat_engine():
index = get_index()
return index.as_chat_engine()
import os
from llama_index import ServiceContext
from llama_index.llms import OpenAI
def create_base_context():
model = os.getenv("MODEL", "gpt-3.5-turbo")
return ServiceContext.from_defaults(
llm=OpenAI(model=model),
)
import logging
import os import os
import logging
from llama_index import ( from llama_index import (
SimpleDirectoryReader,
StorageContext, StorageContext,
VectorStoreIndex,
load_index_from_storage, load_index_from_storage,
ServiceContext,
) )
from llama_index.llms import OpenAI
from app.engine.constants import STORAGE_DIR STORAGE_DIR = "./storage" # directory to cache the generated index
from app.engine.context import create_service_context DATA_DIR = "./data" # directory containing the documents to index
def create_base_context():
model = os.getenv("MODEL", "gpt-3.5-turbo")
return ServiceContext.from_defaults(
llm=OpenAI(model=model),
)
def get_chat_engine(): def get_index():
service_context = create_service_context() service_context = create_base_context()
logger = logging.getLogger("uvicorn")
# check if storage already exists # check if storage already exists
if not os.path.exists(STORAGE_DIR): if not os.path.exists(STORAGE_DIR):
raise Exception( logger.info("Creating new index")
"StorageContext is empty - call 'npm run generate' to generate the storage first" # load the documents and create the index
) documents = SimpleDirectoryReader(DATA_DIR).load_data()
logger = logging.getLogger("uvicorn") index = VectorStoreIndex.from_documents(documents,service_context=service_context)
# load the existing index # store it for later
logger.info(f"Loading index from {STORAGE_DIR}...") index.storage_context.persist(STORAGE_DIR)
storage_context = StorageContext.from_defaults(persist_dir=STORAGE_DIR) logger.info(f"Finished creating new index. Stored in {STORAGE_DIR}")
index = load_index_from_storage(storage_context, service_context=service_context) else:
logger.info(f"Finished loading index from {STORAGE_DIR}") # load the existing index
logger.info(f"Loading index from {STORAGE_DIR}...")
storage_context = StorageContext.from_defaults(persist_dir=STORAGE_DIR)
index = load_index_from_storage(storage_context,service_context=service_context)
logger.info(f"Finished loading index from {STORAGE_DIR}")
return index
def get_chat_engine():
index = get_index()
return index.as_chat_engine() return index.as_chat_engine()
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment