From 5263bde8e78f9eefaad5f0ffbdbd199c27822f5e Mon Sep 17 00:00:00 2001
From: Marcus Schiesser <mail@marcusschiesser.de>
Date: Tue, 8 Oct 2024 16:54:14 +0700
Subject: [PATCH] feat: Use selected LlamaCloud index in multi-agent template
 (#350)

---
 .changeset/witty-hotels-do.md                 |  5 +++++
 .../multiagent/python/app/api/routers/chat.py |  4 ++--
 .../multiagent/python/app/engine/engine.py    |  6 ++---
 .../python/app/examples/choreography.py       | 10 +++++----
 .../python/app/examples/orchestrator.py       | 16 +++++++++-----
 .../python/app/examples/researcher.py         | 22 +++++++++++--------
 .../python/app/examples/workflow.py           |  9 +++++---
 .../fastapi/app/api/routers/chat_config.py    |  6 ++---
 8 files changed, 47 insertions(+), 31 deletions(-)
 create mode 100644 .changeset/witty-hotels-do.md

diff --git a/.changeset/witty-hotels-do.md b/.changeset/witty-hotels-do.md
new file mode 100644
index 00000000..ceb7bb9e
--- /dev/null
+++ b/.changeset/witty-hotels-do.md
@@ -0,0 +1,5 @@
+---
+"create-llama": patch
+---
+
+Use selected LlamaCloud index in multi-agent template
diff --git a/templates/components/multiagent/python/app/api/routers/chat.py b/templates/components/multiagent/python/app/api/routers/chat.py
index e2c80828..314f4761 100644
--- a/templates/components/multiagent/python/app/api/routers/chat.py
+++ b/templates/components/multiagent/python/app/api/routers/chat.py
@@ -28,8 +28,8 @@ async def chat(
         # but agent workflow does not support them yet
         # ignore chat params and use all documents for now
         # TODO: generate filters based on doc_ids
-        # TODO: use chat params
-        engine = get_chat_engine(chat_history=messages)
+        params = data.data or {}
+        engine = get_chat_engine(chat_history=messages, params=params)
 
         event_handler = engine.run(input=last_message_content, streaming=True)
         return VercelStreamResponse(
diff --git a/templates/components/multiagent/python/app/engine/engine.py b/templates/components/multiagent/python/app/engine/engine.py
index e9563975..506179a0 100644
--- a/templates/components/multiagent/python/app/engine/engine.py
+++ b/templates/components/multiagent/python/app/engine/engine.py
@@ -18,11 +18,11 @@ def get_chat_engine(
     agent_type = os.getenv("EXAMPLE_TYPE", "").lower()
     match agent_type:
         case "choreography":
-            agent = create_choreography(chat_history)
+            agent = create_choreography(chat_history, **kwargs)
         case "orchestrator":
-            agent = create_orchestrator(chat_history)
+            agent = create_orchestrator(chat_history, **kwargs)
         case _:
-            agent = create_workflow(chat_history)
+            agent = create_workflow(chat_history, **kwargs)
 
     logger.info(f"Using agent pattern: {agent_type}")
 
diff --git a/templates/components/multiagent/python/app/examples/choreography.py b/templates/components/multiagent/python/app/examples/choreography.py
index 13da60e5..b194c20b 100644
--- a/templates/components/multiagent/python/app/examples/choreography.py
+++ b/templates/components/multiagent/python/app/examples/choreography.py
@@ -8,8 +8,8 @@ from app.examples.researcher import create_researcher
 from llama_index.core.chat_engine.types import ChatMessage
 
 
-def create_choreography(chat_history: Optional[List[ChatMessage]] = None):
-    researcher = create_researcher(chat_history)
+def create_choreography(chat_history: Optional[List[ChatMessage]] = None, **kwargs):
+    researcher = create_researcher(chat_history, **kwargs)
     publisher = create_publisher(chat_history)
     reviewer = FunctionCallingAgent(
         name="reviewer",
@@ -21,12 +21,14 @@ def create_choreography(chat_history: Optional[List[ChatMessage]] = None):
         name="writer",
         agents=[researcher, reviewer, publisher],
         description="expert in writing blog posts, needs researched information and images to write a blog post",
-        system_prompt=dedent("""
+        system_prompt=dedent(
+            """
             You are an expert in writing blog posts. You are given a task to write a blog post. Before starting to write the post, consult the researcher agent to get the information you need. Don't make up any information yourself.
             After creating a draft for the post, send it to the reviewer agent to receive feedback and make sure to incorporate the feedback from the reviewer.
             You can consult the reviewer and researcher a maximum of two times. Your output should contain only the blog post.
             Finally, always request the publisher to create a document (PDF, HTML) and publish the blog post.
-        """),
+        """
+        ),
         # TODO: add chat_history support to AgentCallingAgent
         # chat_history=chat_history,
     )
diff --git a/templates/components/multiagent/python/app/examples/orchestrator.py b/templates/components/multiagent/python/app/examples/orchestrator.py
index 8786dcd3..7cf74c0c 100644
--- a/templates/components/multiagent/python/app/examples/orchestrator.py
+++ b/templates/components/multiagent/python/app/examples/orchestrator.py
@@ -8,28 +8,32 @@ from app.examples.researcher import create_researcher
 from llama_index.core.chat_engine.types import ChatMessage
 
 
-def create_orchestrator(chat_history: Optional[List[ChatMessage]] = None):
-    researcher = create_researcher(chat_history)
+def create_orchestrator(chat_history: Optional[List[ChatMessage]] = None, **kwargs):
+    researcher = create_researcher(chat_history, **kwargs)
     writer = FunctionCallingAgent(
         name="writer",
         description="expert in writing blog posts, need information and images to write a post",
-        system_prompt=dedent("""
+        system_prompt=dedent(
+            """
             You are an expert in writing blog posts.
             You are given a task to write a blog post. Do not make up any information yourself.
             If you don't have the necessary information to write a blog post, reply "I need information about the topic to write the blog post".
             If you need to use images, reply "I need images about the topic to write the blog post". Do not use any dummy images made up by you.
             If you have all the information needed, write the blog post.
-        """),
+        """
+        ),
         chat_history=chat_history,
     )
     reviewer = FunctionCallingAgent(
         name="reviewer",
         description="expert in reviewing blog posts, needs a written blog post to review",
-        system_prompt=dedent("""
+        system_prompt=dedent(
+            """
             You are an expert in reviewing blog posts. You are given a task to review a blog post. Review the post and fix any issues found yourself. You must output a final blog post.
             A post must include at least one valid image. If not, reply "I need images about the topic to write the blog post". An image URL starting with "example" or "your website" is not valid.
             Especially check for logical inconsistencies and proofread the post for grammar and spelling errors.
-        """),
+        """
+        ),
         chat_history=chat_history,
     )
     publisher = create_publisher(chat_history)
diff --git a/templates/components/multiagent/python/app/examples/researcher.py b/templates/components/multiagent/python/app/examples/researcher.py
index 6efa70e9..abcc2da3 100644
--- a/templates/components/multiagent/python/app/examples/researcher.py
+++ b/templates/components/multiagent/python/app/examples/researcher.py
@@ -3,17 +3,19 @@ from textwrap import dedent
 from typing import List
 
 from app.agents.single import FunctionCallingAgent
-from app.engine.index import get_index
+from app.engine.index import IndexConfig, get_index
 from app.engine.tools import ToolFactory
 from llama_index.core.chat_engine.types import ChatMessage
 from llama_index.core.tools import QueryEngineTool, ToolMetadata
 
 
-def _create_query_engine_tool() -> QueryEngineTool:
+def _create_query_engine_tool(params=None) -> QueryEngineTool:
     """
     Provide an agent worker that can be used to query the index.
     """
-    index = get_index()
+    # Add query tool if index exists
+    index_config = IndexConfig(**(params or {}))
+    index = get_index(index_config)
     if index is None:
         return None
     top_k = int(os.getenv("TOP_K", 0))
@@ -31,13 +33,13 @@ def _create_query_engine_tool() -> QueryEngineTool:
     )
 
 
-def _get_research_tools() -> QueryEngineTool:
+def _get_research_tools(**kwargs) -> QueryEngineTool:
     """
     Researcher take responsibility for retrieving information.
     Try init wikipedia or duckduckgo tool if available.
     """
     tools = []
-    query_engine_tool = _create_query_engine_tool()
+    query_engine_tool = _create_query_engine_tool(**kwargs)
     if query_engine_tool is not None:
         tools.append(query_engine_tool)
     researcher_tool_names = ["duckduckgo", "wikipedia.WikipediaToolSpec"]
@@ -48,16 +50,17 @@ def _get_research_tools() -> QueryEngineTool:
     return tools
 
 
-def create_researcher(chat_history: List[ChatMessage]):
+def create_researcher(chat_history: List[ChatMessage], **kwargs):
     """
     Researcher is an agent that take responsibility for using tools to complete a given task.
     """
-    tools = _get_research_tools()
+    tools = _get_research_tools(**kwargs)
     return FunctionCallingAgent(
         name="researcher",
         tools=tools,
         description="expert in retrieving any unknown content or searching for images from the internet",
-        system_prompt=dedent("""
+        system_prompt=dedent(
+            """
             You are a researcher agent. You are given a research task.
             
             If the conversation already includes the information and there is no new request for additional information from the user, you should return the appropriate content to the writer.
@@ -77,6 +80,7 @@ def create_researcher(chat_history: List[ChatMessage]):
 
             If you use the tools but don't find any related information, please return "I didn't find any new information for {the topic}." along with the content you found. Don't try to make up information yourself.
             If the request doesn't need any new information because it was in the conversation history, please return "The task doesn't need any new information. Please reuse the existing content in the conversation history."
-        """),
+        """
+        ),
         chat_history=chat_history,
     )
diff --git a/templates/components/multiagent/python/app/examples/workflow.py b/templates/components/multiagent/python/app/examples/workflow.py
index 8a3432d9..89bd6dba 100644
--- a/templates/components/multiagent/python/app/examples/workflow.py
+++ b/templates/components/multiagent/python/app/examples/workflow.py
@@ -17,9 +17,10 @@ from llama_index.core.workflow import (
 )
 
 
-def create_workflow(chat_history: Optional[List[ChatMessage]] = None):
+def create_workflow(chat_history: Optional[List[ChatMessage]] = None, **kwargs):
     researcher = create_researcher(
         chat_history=chat_history,
+        **kwargs,
     )
     publisher = create_publisher(
         chat_history=chat_history,
@@ -127,7 +128,8 @@ class BlogPostWorkflow(Workflow):
         self, input: str, chat_history: List[ChatMessage]
     ) -> str:
         prompt_template = PromptTemplate(
-            dedent("""
+            dedent(
+                """
                 You are an expert in decision-making, helping people write and publish blog posts.
                 If the user is asking for a file or to publish content, respond with 'publish'.
                 If the user requests to write or update a blog post, respond with 'not_publish'.
@@ -140,7 +142,8 @@ class BlogPostWorkflow(Workflow):
 
                 Given the chat history and the new user request, decide whether to publish based on existing information.
                 Decision (respond with either 'not_publish' or 'publish'):
-            """)
+            """
+            )
         )
 
         chat_history_str = "\n".join(
diff --git a/templates/types/streaming/fastapi/app/api/routers/chat_config.py b/templates/types/streaming/fastapi/app/api/routers/chat_config.py
index 8d926e50..ae88ca9a 100644
--- a/templates/types/streaming/fastapi/app/api/routers/chat_config.py
+++ b/templates/types/streaming/fastapi/app/api/routers/chat_config.py
@@ -23,7 +23,7 @@ async def chat_config() -> ChatConfig:
 try:
     from app.engine.service import LLamaCloudFileService
 
-    logger.info("LlamaCloud is configured. Adding /config/llamacloud route.")
+    print("LlamaCloud is configured. Adding /config/llamacloud route.")
 
     @r.get("/llamacloud")
     async def chat_llama_cloud_config():
@@ -42,7 +42,5 @@ try:
         }
 
 except ImportError:
-    logger.debug(
-        "LlamaCloud is not configured. Skipping adding /config/llamacloud route."
-    )
+    print("LlamaCloud is not configured. Skipping adding /config/llamacloud route.")
     pass
-- 
GitLab