diff --git a/.changeset/good-news-sneeze.md b/.changeset/good-news-sneeze.md
new file mode 100644
index 0000000000000000000000000000000000000000..37ef119d43832c7035d93fd5c6abaf16405a8599
--- /dev/null
+++ b/.changeset/good-news-sneeze.md
@@ -0,0 +1,5 @@
+---
+"create-llama": patch
+---
+
+Bump llama-index to 0.11.11 for the multi-agent template
diff --git a/templates/types/multiagent/fastapi/app/agents/multi.py b/templates/types/multiagent/fastapi/app/agents/multi.py
index adbc10f753d1d41283dfa077579682d3a2a5a807..9a04a3da89010893fe7fd53d8a2cbb574a3086bb 100644
--- a/templates/types/multiagent/fastapi/app/agents/multi.py
+++ b/templates/types/multiagent/fastapi/app/agents/multi.py
@@ -1,16 +1,14 @@
-import asyncio
 from typing import Any, List
 
-from llama_index.core.tools.types import ToolMetadata, ToolOutput
-from llama_index.core.tools.utils import create_schema_from_function
-from llama_index.core.workflow import Context, Workflow
-
+from app.agents.planner import StructuredPlannerAgent
 from app.agents.single import (
     AgentRunResult,
     ContextAwareTool,
     FunctionCallingAgent,
 )
-from app.agents.planner import StructuredPlannerAgent
+from llama_index.core.tools.types import ToolMetadata, ToolOutput
+from llama_index.core.tools.utils import create_schema_from_function
+from llama_index.core.workflow import Context, Workflow
 
 
 class AgentCallTool(ContextAwareTool):
@@ -34,11 +32,11 @@ class AgentCallTool(ContextAwareTool):
 
     # overload the acall function with the ctx argument as it's needed for bubbling the events
     async def acall(self, ctx: Context, input: str) -> ToolOutput:
-        task = asyncio.create_task(self.agent.run(input=input))
+        handler = self.agent.run(input=input)
         # bubble all events while running the agent to the calling agent
-        async for ev in self.agent.stream_events():
+        async for ev in handler.stream_events():
             ctx.write_event_to_stream(ev)
-        ret: AgentRunResult = await task
+        ret: AgentRunResult = await handler
         response = ret.response.message.content
         return ToolOutput(
             content=str(response),
diff --git a/templates/types/multiagent/fastapi/app/agents/planner.py b/templates/types/multiagent/fastapi/app/agents/planner.py
index 8a72def6559f3add69ec05461b6b8a5c7c0243a1..c81944e18f4faa427e17cb51c52f407f413921cd 100644
--- a/templates/types/multiagent/fastapi/app/agents/planner.py
+++ b/templates/types/multiagent/fastapi/app/agents/planner.py
@@ -1,8 +1,8 @@
-import asyncio
 import uuid
 from enum import Enum
 from typing import Any, AsyncGenerator, Dict, List, Optional, Tuple, Union
 
+from app.agents.single import AgentRunEvent, AgentRunResult, FunctionCallingAgent
 from llama_index.core.agent.runner.planner import (
     DEFAULT_INITIAL_PLAN_PROMPT,
     DEFAULT_PLAN_REFINE_PROMPT,
@@ -24,8 +24,6 @@ from llama_index.core.workflow import (
     step,
 )
 
-from app.agents.single import AgentRunEvent, AgentRunResult, FunctionCallingAgent
-
 
 class ExecutePlanEvent(Event):
     pass
@@ -125,16 +123,14 @@ class StructuredPlannerAgent(Workflow):
         is_last_tasks = ctx.data["num_sub_tasks"] == self.get_remaining_subtasks(ctx)
         # TODO: streaming only works without plan refining
         streaming = is_last_tasks and ctx.data["streaming"] and not self.refine_plan
-        task = asyncio.create_task(
-            self.executor.run(
-                input=ev.sub_task.input,
-                streaming=streaming,
-            )
+        handler = self.executor.run(
+            input=ev.sub_task.input,
+            streaming=streaming,
         )
         # bubble all events while running the executor to the planner
-        async for event in self.executor.stream_events():
+        async for event in handler.stream_events():
             ctx.write_event_to_stream(event)
-        result = await task
+        result: AgentRunResult = await handler
         if self._verbose:
             print("=== Done executing sub task ===\n")
         self.planner.state.add_completed_sub_task(ctx.data["act_plan_id"], ev.sub_task)
diff --git a/templates/types/multiagent/fastapi/app/api/routers/chat.py b/templates/types/multiagent/fastapi/app/api/routers/chat.py
index beeb724edbcf9cc0a109d5ba5f82f4408c67d9fe..2b7a5636ffa29ed3f758cd8ded9c6f0897f06f80 100644
--- a/templates/types/multiagent/fastapi/app/api/routers/chat.py
+++ b/templates/types/multiagent/fastapi/app/api/routers/chat.py
@@ -1,14 +1,12 @@
-import asyncio
 import logging
 
-from fastapi import APIRouter, HTTPException, Request, status
-from llama_index.core.workflow import Workflow
-
-from app.examples.factory import create_agent
 from app.api.routers.models import (
     ChatData,
 )
 from app.api.routers.vercel_response import VercelStreamResponse
+from app.examples.factory import create_agent
+from fastapi import APIRouter, HTTPException, Request, status
+from llama_index.core.workflow import Workflow
 
 chat_router = r = APIRouter()
 
@@ -30,11 +28,9 @@ async def chat(
         # params = data.data or {}
 
         agent: Workflow = create_agent(chat_history=messages)
-        task = asyncio.create_task(
-            agent.run(input=last_message_content, streaming=True)
-        )
+        handler = agent.run(input=last_message_content, streaming=True)
 
-        return VercelStreamResponse(request, task, agent.stream_events, data)
+        return VercelStreamResponse(request, handler, agent.stream_events, data)
     except Exception as e:
         logger.exception("Error in agent", exc_info=True)
         raise HTTPException(
diff --git a/templates/types/multiagent/fastapi/app/examples/workflow.py b/templates/types/multiagent/fastapi/app/examples/workflow.py
index 731b30335c87c14a637ea61ecdce4837144ef9ce..ba62644319419feaa113ae103bf1111e9089d65b 100644
--- a/templates/types/multiagent/fastapi/app/examples/workflow.py
+++ b/templates/types/multiagent/fastapi/app/examples/workflow.py
@@ -1,7 +1,8 @@
-import asyncio
 from typing import AsyncGenerator, List, Optional
 
-
+from app.agents.single import AgentRunEvent, AgentRunResult, FunctionCallingAgent
+from app.examples.researcher import create_researcher
+from llama_index.core.chat_engine.types import ChatMessage
 from llama_index.core.workflow import (
     Context,
     Event,
@@ -10,9 +11,6 @@ from llama_index.core.workflow import (
     Workflow,
     step,
 )
-from llama_index.core.chat_engine.types import ChatMessage
-from app.agents.single import AgentRunEvent, AgentRunResult, FunctionCallingAgent
-from app.examples.researcher import create_researcher
 
 
 def create_workflow(chat_history: Optional[List[ChatMessage]] = None):
@@ -132,8 +130,8 @@ Review:
         input: str,
         streaming: bool = False,
     ) -> AgentRunResult | AsyncGenerator:
-        task = asyncio.create_task(agent.run(input=input, streaming=streaming))
+        handler = agent.run(input=input, streaming=streaming)
         # bubble all events while running the executor to the planner
-        async for event in agent.stream_events():
+        async for event in handler.stream_events():
             ctx.write_event_to_stream(event)
-        return await task
+        return await handler
diff --git a/templates/types/multiagent/fastapi/pyproject.toml b/templates/types/multiagent/fastapi/pyproject.toml
index a553f9f109fa287875f61570dc3761190ebb9518..5e5f6a98be17476170cbaa6c7077572402bd0b56 100644
--- a/templates/types/multiagent/fastapi/pyproject.toml
+++ b/templates/types/multiagent/fastapi/pyproject.toml
@@ -12,8 +12,7 @@ generate = "app.engine.generate:generate_datasource"
 [tool.poetry.dependencies]
 python = "^3.11"
 llama-index-agent-openai = ">=0.3.0,<0.4.0"
-llama-index = "0.11.9"
-llama-index-core = "0.11.9"
+llama-index = "0.11.11"
 fastapi = "^0.112.2"
 python-dotenv = "^1.0.0"
 uvicorn = { extras = ["standard"], version = "^0.23.2" }