Skip to content
Snippets Groups Projects
Commit 8e4873a6 authored by thucpn's avatar thucpn Committed by Marcus Schiesser
Browse files

feat: create chat engine folder for python

parent 4c05b262
No related branches found
No related tags found
No related merge requests found
......@@ -189,7 +189,11 @@ export const askQuestions = async (
}
}
if (program.framework === "express" || program.framework === "nextjs") {
if (
program.framework === "express" ||
program.framework === "nextjs" ||
program.framework === "fastapi"
) {
if (!program.model) {
if (ciInfo.isCI) {
program.model = getPrefOrDefault("model");
......@@ -218,7 +222,11 @@ export const askQuestions = async (
}
}
if (program.framework === "express" || program.framework === "nextjs") {
if (
program.framework === "express" ||
program.framework === "nextjs" ||
program.framework === "fastapi"
) {
if (!program.engine) {
if (ciInfo.isCI) {
program.engine = getPrefOrDefault("engine");
......@@ -243,7 +251,11 @@ export const askQuestions = async (
preferences.engine = engine;
}
}
if (program.engine !== "simple" && !program.vectorDb) {
if (
program.engine !== "simple" &&
!program.vectorDb &&
program.framework !== "fastapi"
) {
if (ciInfo.isCI) {
program.vectorDb = getPrefOrDefault("vectorDb");
} else {
......
import logging
import os
from llama_index import (
StorageContext,
load_index_from_storage,
)
from app.engine.constants import STORAGE_DIR
from app.engine.context import create_service_context
def get_chat_engine():
service_context = create_service_context()
# check if storage already exists
if not os.path.exists(STORAGE_DIR):
raise Exception(
"StorageContext is empty - call 'npm run generate' to generate the storage first"
)
logger = logging.getLogger("uvicorn")
# load the existing index
logger.info(f"Loading index from {STORAGE_DIR}...")
storage_context = StorageContext.from_defaults(persist_dir=STORAGE_DIR)
index = load_index_from_storage(storage_context, service_context=service_context)
logger.info(f"Finished loading index from {STORAGE_DIR}")
return index.as_chat_engine()
......@@ -311,7 +311,8 @@ const installPythonTemplate = async ({
root,
template,
framework,
}: Pick<InstallTemplateArgs, "root" | "framework" | "template">) => {
engine,
}: Pick<InstallTemplateArgs, "root" | "framework" | "template" | "engine">) => {
console.log("\nInitializing Python project with template:", template, "\n");
const templatePath = path.join(__dirname, "types", template, framework);
await copy("**", root, {
......@@ -334,6 +335,15 @@ const installPythonTemplate = async ({
},
});
if (engine === "context") {
const compPath = path.join(__dirname, "components");
const VectorDBPath = path.join(compPath, "vectordbs", "python", "none");
await copy("**", path.join(root, "app", "engine"), {
parents: true,
cwd: VectorDBPath,
});
}
console.log(
"\nPython project, dependencies won't be installed automatically.\n",
);
......
......@@ -4,7 +4,7 @@ from fastapi import APIRouter, Depends, HTTPException, status
from llama_index import VectorStoreIndex
from llama_index.llms.base import MessageRole, ChatMessage
from pydantic import BaseModel
from app.context import get_index
from app.engine.index import get_chat_engine
chat_router = r = APIRouter()
......@@ -25,7 +25,7 @@ class _Result(BaseModel):
@r.post("")
async def chat(
data: _ChatData,
index: VectorStoreIndex = Depends(get_index),
chat_engine: VectorStoreIndex = Depends(get_chat_engine),
) -> _Result:
# check preconditions and get last message
if len(data.messages) == 0:
......@@ -49,7 +49,6 @@ async def chat(
]
# query chat engine
chat_engine = index.as_chat_engine()
response = chat_engine.chat(lastMessage.content, messages)
return _Result(
result=_Message(role=MessageRole.ASSISTANT, content=response.response)
......
......@@ -38,3 +38,7 @@ def get_index():
index = load_index_from_storage(storage_context,service_context=service_context)
logger.info(f"Finished loading index from {STORAGE_DIR}")
return index
def get_chat_engine():
index = get_index()
return index.as_chat_engine()
import os
from llama_index import ServiceContext
from llama_index.llms import OpenAI
def create_base_context():
model = os.getenv("MODEL", "gpt-3.5-turbo")
return ServiceContext.from_defaults(
llm=OpenAI(model=model),
)
import logging
import os
import logging
from llama_index import (
SimpleDirectoryReader,
StorageContext,
VectorStoreIndex,
load_index_from_storage,
ServiceContext,
)
from llama_index.llms import OpenAI
from app.engine.constants import STORAGE_DIR
from app.engine.context import create_service_context
STORAGE_DIR = "./storage" # directory to cache the generated index
DATA_DIR = "./data" # directory containing the documents to index
def create_base_context():
model = os.getenv("MODEL", "gpt-3.5-turbo")
return ServiceContext.from_defaults(
llm=OpenAI(model=model),
)
def get_chat_engine():
service_context = create_service_context()
def get_index():
service_context = create_base_context()
logger = logging.getLogger("uvicorn")
# check if storage already exists
if not os.path.exists(STORAGE_DIR):
raise Exception(
"StorageContext is empty - call 'npm run generate' to generate the storage first"
)
logger = logging.getLogger("uvicorn")
# load the existing index
logger.info(f"Loading index from {STORAGE_DIR}...")
storage_context = StorageContext.from_defaults(persist_dir=STORAGE_DIR)
index = load_index_from_storage(storage_context, service_context=service_context)
logger.info(f"Finished loading index from {STORAGE_DIR}")
logger.info("Creating new index")
# load the documents and create the index
documents = SimpleDirectoryReader(DATA_DIR).load_data()
index = VectorStoreIndex.from_documents(documents,service_context=service_context)
# store it for later
index.storage_context.persist(STORAGE_DIR)
logger.info(f"Finished creating new index. Stored in {STORAGE_DIR}")
else:
# load the existing index
logger.info(f"Loading index from {STORAGE_DIR}...")
storage_context = StorageContext.from_defaults(persist_dir=STORAGE_DIR)
index = load_index_from_storage(storage_context,service_context=service_context)
logger.info(f"Finished loading index from {STORAGE_DIR}")
return index
def get_chat_engine():
index = get_index()
return index.as_chat_engine()
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment