From 0031e674c92090b34b1b19cf8c10c7000406b0e5 Mon Sep 17 00:00:00 2001
From: Huu Le <39040748+leehuwuj@users.noreply.github.com>
Date: Mon, 23 Sep 2024 09:37:13 +0700
Subject: [PATCH] Support llama-index@^0.11.11 for multi-agent template (#305)

---
 .changeset/good-news-sneeze.md                   |  5 +++++
 .../types/multiagent/fastapi/app/agents/multi.py | 16 +++++++---------
 .../multiagent/fastapi/app/agents/planner.py     | 16 ++++++----------
 .../multiagent/fastapi/app/api/routers/chat.py   | 14 +++++---------
 .../multiagent/fastapi/app/examples/workflow.py  | 14 ++++++--------
 .../types/multiagent/fastapi/pyproject.toml      |  3 +--
 6 files changed, 30 insertions(+), 38 deletions(-)
 create mode 100644 .changeset/good-news-sneeze.md

diff --git a/.changeset/good-news-sneeze.md b/.changeset/good-news-sneeze.md
new file mode 100644
index 00000000..37ef119d
--- /dev/null
+++ b/.changeset/good-news-sneeze.md
@@ -0,0 +1,5 @@
+---
+"create-llama": patch
+---
+
+Bump llama-index to 0.11.11 for the multi-agent template
diff --git a/templates/types/multiagent/fastapi/app/agents/multi.py b/templates/types/multiagent/fastapi/app/agents/multi.py
index adbc10f7..9a04a3da 100644
--- a/templates/types/multiagent/fastapi/app/agents/multi.py
+++ b/templates/types/multiagent/fastapi/app/agents/multi.py
@@ -1,16 +1,14 @@
-import asyncio
 from typing import Any, List
 
-from llama_index.core.tools.types import ToolMetadata, ToolOutput
-from llama_index.core.tools.utils import create_schema_from_function
-from llama_index.core.workflow import Context, Workflow
-
+from app.agents.planner import StructuredPlannerAgent
 from app.agents.single import (
     AgentRunResult,
     ContextAwareTool,
     FunctionCallingAgent,
 )
-from app.agents.planner import StructuredPlannerAgent
+from llama_index.core.tools.types import ToolMetadata, ToolOutput
+from llama_index.core.tools.utils import create_schema_from_function
+from llama_index.core.workflow import Context, Workflow
 
 
 class AgentCallTool(ContextAwareTool):
@@ -34,11 +32,11 @@ class AgentCallTool(ContextAwareTool):
 
     # overload the acall function with the ctx argument as it's needed for bubbling the events
     async def acall(self, ctx: Context, input: str) -> ToolOutput:
-        task = asyncio.create_task(self.agent.run(input=input))
+        handler = self.agent.run(input=input)
         # bubble all events while running the agent to the calling agent
-        async for ev in self.agent.stream_events():
+        async for ev in handler.stream_events():
             ctx.write_event_to_stream(ev)
-        ret: AgentRunResult = await task
+        ret: AgentRunResult = await handler
         response = ret.response.message.content
         return ToolOutput(
             content=str(response),
diff --git a/templates/types/multiagent/fastapi/app/agents/planner.py b/templates/types/multiagent/fastapi/app/agents/planner.py
index 8a72def6..c81944e1 100644
--- a/templates/types/multiagent/fastapi/app/agents/planner.py
+++ b/templates/types/multiagent/fastapi/app/agents/planner.py
@@ -1,8 +1,8 @@
-import asyncio
 import uuid
 from enum import Enum
 from typing import Any, AsyncGenerator, Dict, List, Optional, Tuple, Union
 
+from app.agents.single import AgentRunEvent, AgentRunResult, FunctionCallingAgent
 from llama_index.core.agent.runner.planner import (
     DEFAULT_INITIAL_PLAN_PROMPT,
     DEFAULT_PLAN_REFINE_PROMPT,
@@ -24,8 +24,6 @@ from llama_index.core.workflow import (
     step,
 )
 
-from app.agents.single import AgentRunEvent, AgentRunResult, FunctionCallingAgent
-
 
 class ExecutePlanEvent(Event):
     pass
@@ -125,16 +123,14 @@ class StructuredPlannerAgent(Workflow):
         is_last_tasks = ctx.data["num_sub_tasks"] == self.get_remaining_subtasks(ctx)
         # TODO: streaming only works without plan refining
         streaming = is_last_tasks and ctx.data["streaming"] and not self.refine_plan
-        task = asyncio.create_task(
-            self.executor.run(
-                input=ev.sub_task.input,
-                streaming=streaming,
-            )
+        handler = self.executor.run(
+            input=ev.sub_task.input,
+            streaming=streaming,
         )
         # bubble all events while running the executor to the planner
-        async for event in self.executor.stream_events():
+        async for event in handler.stream_events():
             ctx.write_event_to_stream(event)
-        result = await task
+        result: AgentRunResult = await handler
         if self._verbose:
             print("=== Done executing sub task ===\n")
         self.planner.state.add_completed_sub_task(ctx.data["act_plan_id"], ev.sub_task)
diff --git a/templates/types/multiagent/fastapi/app/api/routers/chat.py b/templates/types/multiagent/fastapi/app/api/routers/chat.py
index beeb724e..2b7a5636 100644
--- a/templates/types/multiagent/fastapi/app/api/routers/chat.py
+++ b/templates/types/multiagent/fastapi/app/api/routers/chat.py
@@ -1,14 +1,12 @@
-import asyncio
 import logging
 
-from fastapi import APIRouter, HTTPException, Request, status
-from llama_index.core.workflow import Workflow
-
-from app.examples.factory import create_agent
 from app.api.routers.models import (
     ChatData,
 )
 from app.api.routers.vercel_response import VercelStreamResponse
+from app.examples.factory import create_agent
+from fastapi import APIRouter, HTTPException, Request, status
+from llama_index.core.workflow import Workflow
 
 chat_router = r = APIRouter()
 
@@ -30,11 +28,9 @@ async def chat(
         # params = data.data or {}
 
         agent: Workflow = create_agent(chat_history=messages)
-        task = asyncio.create_task(
-            agent.run(input=last_message_content, streaming=True)
-        )
+        handler = agent.run(input=last_message_content, streaming=True)
 
-        return VercelStreamResponse(request, task, agent.stream_events, data)
+        return VercelStreamResponse(request, handler, agent.stream_events, data)
     except Exception as e:
         logger.exception("Error in agent", exc_info=True)
         raise HTTPException(
diff --git a/templates/types/multiagent/fastapi/app/examples/workflow.py b/templates/types/multiagent/fastapi/app/examples/workflow.py
index 731b3033..ba626443 100644
--- a/templates/types/multiagent/fastapi/app/examples/workflow.py
+++ b/templates/types/multiagent/fastapi/app/examples/workflow.py
@@ -1,7 +1,8 @@
-import asyncio
 from typing import AsyncGenerator, List, Optional
 
-
+from app.agents.single import AgentRunEvent, AgentRunResult, FunctionCallingAgent
+from app.examples.researcher import create_researcher
+from llama_index.core.chat_engine.types import ChatMessage
 from llama_index.core.workflow import (
     Context,
     Event,
@@ -10,9 +11,6 @@ from llama_index.core.workflow import (
     Workflow,
     step,
 )
-from llama_index.core.chat_engine.types import ChatMessage
-from app.agents.single import AgentRunEvent, AgentRunResult, FunctionCallingAgent
-from app.examples.researcher import create_researcher
 
 
 def create_workflow(chat_history: Optional[List[ChatMessage]] = None):
@@ -132,8 +130,8 @@ Review:
         input: str,
         streaming: bool = False,
     ) -> AgentRunResult | AsyncGenerator:
-        task = asyncio.create_task(agent.run(input=input, streaming=streaming))
+        handler = agent.run(input=input, streaming=streaming)
         # bubble all events while running the executor to the planner
-        async for event in agent.stream_events():
+        async for event in handler.stream_events():
             ctx.write_event_to_stream(event)
-        return await task
+        return await handler
diff --git a/templates/types/multiagent/fastapi/pyproject.toml b/templates/types/multiagent/fastapi/pyproject.toml
index a553f9f1..5e5f6a98 100644
--- a/templates/types/multiagent/fastapi/pyproject.toml
+++ b/templates/types/multiagent/fastapi/pyproject.toml
@@ -12,8 +12,7 @@ generate = "app.engine.generate:generate_datasource"
 [tool.poetry.dependencies]
 python = "^3.11"
 llama-index-agent-openai = ">=0.3.0,<0.4.0"
-llama-index = "0.11.9"
-llama-index-core = "0.11.9"
+llama-index = "0.11.11"
 fastapi = "^0.112.2"
 python-dotenv = "^1.0.0"
 uvicorn = { extras = ["standard"], version = "^0.23.2" }
-- 
GitLab