Skip to content
Snippets Groups Projects
Commit cbebd031 authored by leehuwuj's avatar leehuwuj
Browse files

stg

parent bc2d503f
No related branches found
No related tags found
No related merge requests found
Showing
with 26 additions and 97 deletions
......@@ -470,12 +470,6 @@ export const installPythonTemplate = async ({
}
}
// Copy engine code
await copy("**", enginePath, {
parents: true,
cwd: path.join(compPath, "engines", "python", engine),
});
// Copy router code
await copyRouterCode(root, tools ?? []);
}
......
......@@ -19,10 +19,12 @@ export const getDataSourceChoices = (
});
}
if (selectedDataSource === undefined || selectedDataSource.length === 0) {
choices.push({
title: "No datasource",
value: "none",
});
if (framework !== "fastapi") {
choices.push({
title: "No datasource",
value: "none",
});
}
choices.push({
title:
process.platform !== "linux"
......
import os
from app.engine.index import IndexConfig, get_index
from app.engine.node_postprocessors import NodeCitationProcessor
from fastapi import HTTPException
from llama_index.core.callbacks import CallbackManager
from llama_index.core.chat_engine import CondensePlusContextChatEngine
from llama_index.core.memory import ChatMemoryBuffer
from llama_index.core.settings import Settings
def get_chat_engine(params=None, event_handlers=None, **kwargs):
system_prompt = os.getenv("SYSTEM_PROMPT")
citation_prompt = os.getenv("SYSTEM_CITATION_PROMPT", None)
top_k = int(os.getenv("TOP_K", 0))
llm = Settings.llm
memory = ChatMemoryBuffer.from_defaults(
token_limit=llm.metadata.context_window - 256
)
callback_manager = CallbackManager(handlers=event_handlers or [])
node_postprocessors = []
if citation_prompt:
node_postprocessors = [NodeCitationProcessor()]
system_prompt = f"{system_prompt}\n{citation_prompt}"
index_config = IndexConfig(callback_manager=callback_manager, **(params or {}))
index = get_index(index_config)
if index is None:
raise HTTPException(
status_code=500,
detail=str(
"StorageContext is empty - call 'poetry run generate' to generate the storage first"
),
)
if top_k != 0 and kwargs.get("similarity_top_k") is None:
kwargs["similarity_top_k"] = top_k
retriever = index.as_retriever(**kwargs)
return CondensePlusContextChatEngine(
llm=llm,
memory=memory,
system_prompt=system_prompt,
retriever=retriever,
node_postprocessors=node_postprocessors, # type: ignore
callback_manager=callback_manager,
)
from typing import List, Optional
from llama_index.core import QueryBundle
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.schema import NodeWithScore
class NodeCitationProcessor(BaseNodePostprocessor):
"""
Append node_id into metadata for citation purpose.
Config SYSTEM_CITATION_PROMPT in your runtime environment variable to enable this feature.
"""
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
for node_score in nodes:
node_score.node.metadata["node_id"] = node_score.node.node_id
return nodes
......@@ -59,25 +59,26 @@ async def chat(
# TODO: Update non-streaming endpoint
# non-streaming endpoint - delete if not needed
@r.post("/request")
async def chat_request(
data: ChatData,
) -> Result:
last_message_content = data.get_last_message_content()
messages = data.get_history_messages()
# Would be better if we use same chat.py endpoint for both agent and multiagent templates
# # non-streaming endpoint - delete if not needed
# @r.post("/request")
# async def chat_request(
# data: ChatData,
# ) -> Result:
# last_message_content = data.get_last_message_content()
# messages = data.get_history_messages()
doc_ids = data.get_chat_document_ids()
filters = generate_filters(doc_ids)
params = data.data or {}
logger.info(
f"Creating chat engine with filters: {str(filters)}",
)
# doc_ids = data.get_chat_document_ids()
# filters = generate_filters(doc_ids)
# params = data.data or {}
# logger.info(
# f"Creating chat engine with filters: {str(filters)}",
# )
chat_engine = get_chat_engine(filters=filters, params=params)
# chat_engine = get_chat_engine(filters=filters, params=params)
response = await chat_engine.achat(last_message_content, messages)
return Result(
result=Message(role=MessageRole.ASSISTANT, content=response.response),
nodes=SourceNodes.from_source_nodes(response.source_nodes),
)
# response = await chat_engine.achat(last_message_content, messages)
# return Result(
# result=Message(role=MessageRole.ASSISTANT, content=response.response),
# nodes=SourceNodes.from_source_nodes(response.source_nodes),
# )
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment