diff --git a/.changeset/witty-hotels-do.md b/.changeset/witty-hotels-do.md
new file mode 100644
index 0000000000000000000000000000000000000000..ceb7bb9e882fc5fad1722aaa75048558dfa0fa04
--- /dev/null
+++ b/.changeset/witty-hotels-do.md
@@ -0,0 +1,5 @@
+---
+"create-llama": patch
+---
+
+Use selected LlamaCloud index in multi-agent template
diff --git a/templates/components/multiagent/python/app/api/routers/chat.py b/templates/components/multiagent/python/app/api/routers/chat.py
index e2c808282d13f2522e576eb686ec74d087004b12..314f476110254c6735825f0459a8bebf7642a012 100644
--- a/templates/components/multiagent/python/app/api/routers/chat.py
+++ b/templates/components/multiagent/python/app/api/routers/chat.py
@@ -28,8 +28,8 @@ async def chat(
         # but agent workflow does not support them yet
         # ignore chat params and use all documents for now
         # TODO: generate filters based on doc_ids
-        # TODO: use chat params
-        engine = get_chat_engine(chat_history=messages)
+        params = data.data or {}
+        engine = get_chat_engine(chat_history=messages, params=params)
 
         event_handler = engine.run(input=last_message_content, streaming=True)
         return VercelStreamResponse(
diff --git a/templates/components/multiagent/python/app/engine/engine.py b/templates/components/multiagent/python/app/engine/engine.py
index e9563975d57a61267d25be404efd6460b135f29e..506179a02cf1d596d98a08e6175a28487620da70 100644
--- a/templates/components/multiagent/python/app/engine/engine.py
+++ b/templates/components/multiagent/python/app/engine/engine.py
@@ -18,11 +18,11 @@ def get_chat_engine(
     agent_type = os.getenv("EXAMPLE_TYPE", "").lower()
     match agent_type:
         case "choreography":
-            agent = create_choreography(chat_history)
+            agent = create_choreography(chat_history, **kwargs)
         case "orchestrator":
-            agent = create_orchestrator(chat_history)
+            agent = create_orchestrator(chat_history, **kwargs)
         case _:
-            agent = create_workflow(chat_history)
+            agent = create_workflow(chat_history, **kwargs)
 
     logger.info(f"Using agent pattern: {agent_type}")
 
diff --git a/templates/components/multiagent/python/app/examples/choreography.py b/templates/components/multiagent/python/app/examples/choreography.py
index 13da60e53f820160a15ed75829c6d7b1172460df..b194c20b73b47703c0aacde618e413fa5325cb6e 100644
--- a/templates/components/multiagent/python/app/examples/choreography.py
+++ b/templates/components/multiagent/python/app/examples/choreography.py
@@ -8,8 +8,8 @@ from app.examples.researcher import create_researcher
 from llama_index.core.chat_engine.types import ChatMessage
 
 
-def create_choreography(chat_history: Optional[List[ChatMessage]] = None):
-    researcher = create_researcher(chat_history)
+def create_choreography(chat_history: Optional[List[ChatMessage]] = None, **kwargs):
+    researcher = create_researcher(chat_history, **kwargs)
     publisher = create_publisher(chat_history)
     reviewer = FunctionCallingAgent(
         name="reviewer",
@@ -21,12 +21,14 @@ def create_choreography(chat_history: Optional[List[ChatMessage]] = None):
         name="writer",
         agents=[researcher, reviewer, publisher],
         description="expert in writing blog posts, needs researched information and images to write a blog post",
-        system_prompt=dedent("""
+        system_prompt=dedent(
+            """
             You are an expert in writing blog posts. You are given a task to write a blog post. Before starting to write the post, consult the researcher agent to get the information you need. Don't make up any information yourself.
             After creating a draft for the post, send it to the reviewer agent to receive feedback and make sure to incorporate the feedback from the reviewer.
             You can consult the reviewer and researcher a maximum of two times. Your output should contain only the blog post.
             Finally, always request the publisher to create a document (PDF, HTML) and publish the blog post.
-        """),
+        """
+        ),
         # TODO: add chat_history support to AgentCallingAgent
         # chat_history=chat_history,
     )
diff --git a/templates/components/multiagent/python/app/examples/orchestrator.py b/templates/components/multiagent/python/app/examples/orchestrator.py
index 8786dcd3f9231ed98be2391dc25d610ae3a1844f..7cf74c0cb1e1209c8dfd40286f19857dc8fdeb66 100644
--- a/templates/components/multiagent/python/app/examples/orchestrator.py
+++ b/templates/components/multiagent/python/app/examples/orchestrator.py
@@ -8,28 +8,32 @@ from app.examples.researcher import create_researcher
 from llama_index.core.chat_engine.types import ChatMessage
 
 
-def create_orchestrator(chat_history: Optional[List[ChatMessage]] = None):
-    researcher = create_researcher(chat_history)
+def create_orchestrator(chat_history: Optional[List[ChatMessage]] = None, **kwargs):
+    researcher = create_researcher(chat_history, **kwargs)
     writer = FunctionCallingAgent(
         name="writer",
         description="expert in writing blog posts, need information and images to write a post",
-        system_prompt=dedent("""
+        system_prompt=dedent(
+            """
             You are an expert in writing blog posts.
             You are given a task to write a blog post. Do not make up any information yourself.
             If you don't have the necessary information to write a blog post, reply "I need information about the topic to write the blog post".
             If you need to use images, reply "I need images about the topic to write the blog post". Do not use any dummy images made up by you.
             If you have all the information needed, write the blog post.
-        """),
+        """
+        ),
         chat_history=chat_history,
     )
     reviewer = FunctionCallingAgent(
         name="reviewer",
         description="expert in reviewing blog posts, needs a written blog post to review",
-        system_prompt=dedent("""
+        system_prompt=dedent(
+            """
             You are an expert in reviewing blog posts. You are given a task to review a blog post. Review the post and fix any issues found yourself. You must output a final blog post.
             A post must include at least one valid image. If not, reply "I need images about the topic to write the blog post". An image URL starting with "example" or "your website" is not valid.
             Especially check for logical inconsistencies and proofread the post for grammar and spelling errors.
-        """),
+        """
+        ),
         chat_history=chat_history,
     )
     publisher = create_publisher(chat_history)
diff --git a/templates/components/multiagent/python/app/examples/researcher.py b/templates/components/multiagent/python/app/examples/researcher.py
index 6efa70e9edc0ab23b25e1fbe6a1738a5fddeab05..abcc2da385f19c793265780ae1480d8b1700b911 100644
--- a/templates/components/multiagent/python/app/examples/researcher.py
+++ b/templates/components/multiagent/python/app/examples/researcher.py
@@ -3,17 +3,19 @@ from textwrap import dedent
 from typing import List
 
 from app.agents.single import FunctionCallingAgent
-from app.engine.index import get_index
+from app.engine.index import IndexConfig, get_index
 from app.engine.tools import ToolFactory
 from llama_index.core.chat_engine.types import ChatMessage
 from llama_index.core.tools import QueryEngineTool, ToolMetadata
 
 
-def _create_query_engine_tool() -> QueryEngineTool:
+def _create_query_engine_tool(params=None) -> QueryEngineTool:
     """
     Provide an agent worker that can be used to query the index.
     """
-    index = get_index()
+    # Add query tool if index exists
+    index_config = IndexConfig(**(params or {}))
+    index = get_index(index_config)
     if index is None:
         return None
     top_k = int(os.getenv("TOP_K", 0))
@@ -31,13 +33,13 @@ def _create_query_engine_tool() -> QueryEngineTool:
     )
 
 
-def _get_research_tools() -> QueryEngineTool:
+def _get_research_tools(**kwargs) -> QueryEngineTool:
     """
     Researcher take responsibility for retrieving information.
     Try init wikipedia or duckduckgo tool if available.
     """
     tools = []
-    query_engine_tool = _create_query_engine_tool()
+    query_engine_tool = _create_query_engine_tool(**kwargs)
     if query_engine_tool is not None:
         tools.append(query_engine_tool)
     researcher_tool_names = ["duckduckgo", "wikipedia.WikipediaToolSpec"]
@@ -48,16 +50,17 @@ def _get_research_tools() -> QueryEngineTool:
     return tools
 
 
-def create_researcher(chat_history: List[ChatMessage]):
+def create_researcher(chat_history: List[ChatMessage], **kwargs):
     """
     Researcher is an agent that take responsibility for using tools to complete a given task.
     """
-    tools = _get_research_tools()
+    tools = _get_research_tools(**kwargs)
     return FunctionCallingAgent(
         name="researcher",
         tools=tools,
         description="expert in retrieving any unknown content or searching for images from the internet",
-        system_prompt=dedent("""
+        system_prompt=dedent(
+            """
             You are a researcher agent. You are given a research task.
             
             If the conversation already includes the information and there is no new request for additional information from the user, you should return the appropriate content to the writer.
@@ -77,6 +80,7 @@ def create_researcher(chat_history: List[ChatMessage]):
 
             If you use the tools but don't find any related information, please return "I didn't find any new information for {the topic}." along with the content you found. Don't try to make up information yourself.
             If the request doesn't need any new information because it was in the conversation history, please return "The task doesn't need any new information. Please reuse the existing content in the conversation history."
-        """),
+        """
+        ),
         chat_history=chat_history,
     )
diff --git a/templates/components/multiagent/python/app/examples/workflow.py b/templates/components/multiagent/python/app/examples/workflow.py
index 8a3432d9467f6efeab7014b819416b4f93c714a1..89bd6dbafd5e2f7749b0e5cf2ef06b77898fe86c 100644
--- a/templates/components/multiagent/python/app/examples/workflow.py
+++ b/templates/components/multiagent/python/app/examples/workflow.py
@@ -17,9 +17,10 @@ from llama_index.core.workflow import (
 )
 
 
-def create_workflow(chat_history: Optional[List[ChatMessage]] = None):
+def create_workflow(chat_history: Optional[List[ChatMessage]] = None, **kwargs):
     researcher = create_researcher(
         chat_history=chat_history,
+        **kwargs,
     )
     publisher = create_publisher(
         chat_history=chat_history,
@@ -127,7 +128,8 @@ class BlogPostWorkflow(Workflow):
         self, input: str, chat_history: List[ChatMessage]
     ) -> str:
         prompt_template = PromptTemplate(
-            dedent("""
+            dedent(
+                """
                 You are an expert in decision-making, helping people write and publish blog posts.
                 If the user is asking for a file or to publish content, respond with 'publish'.
                 If the user requests to write or update a blog post, respond with 'not_publish'.
@@ -140,7 +142,8 @@ class BlogPostWorkflow(Workflow):
 
                 Given the chat history and the new user request, decide whether to publish based on existing information.
                 Decision (respond with either 'not_publish' or 'publish'):
-            """)
+            """
+            )
         )
 
         chat_history_str = "\n".join(
diff --git a/templates/types/streaming/fastapi/app/api/routers/chat_config.py b/templates/types/streaming/fastapi/app/api/routers/chat_config.py
index 8d926e50bab944d9baff2ae2cb681ab6d48d4cef..ae88ca9a59d72c3520ce0eb9ab5f99ef5bd4dc05 100644
--- a/templates/types/streaming/fastapi/app/api/routers/chat_config.py
+++ b/templates/types/streaming/fastapi/app/api/routers/chat_config.py
@@ -23,7 +23,7 @@ async def chat_config() -> ChatConfig:
 try:
     from app.engine.service import LLamaCloudFileService
 
-    logger.info("LlamaCloud is configured. Adding /config/llamacloud route.")
+    print("LlamaCloud is configured. Adding /config/llamacloud route.")
 
     @r.get("/llamacloud")
     async def chat_llama_cloud_config():
@@ -42,7 +42,5 @@ try:
         }
 
 except ImportError:
-    logger.debug(
-        "LlamaCloud is not configured. Skipping adding /config/llamacloud route."
-    )
+    print("LlamaCloud is not configured. Skipping adding /config/llamacloud route.")
     pass