diff --git a/.changeset/shy-horses-think.md b/.changeset/shy-horses-think.md
new file mode 100644
index 0000000000000000000000000000000000000000..576b92d44f0670c476def6c9715206b90dbdf1f9
--- /dev/null
+++ b/.changeset/shy-horses-think.md
@@ -0,0 +1,5 @@
+---
+"create-llama": patch
+---
+
+Add new template for a multi-agents app
diff --git a/helpers/env-variables.ts b/helpers/env-variables.ts
index 6e19a11704341c3b2d1d9c257bf63d3a6b80b8fb..b6f7af862b4e9e146bb41549069608ad94213dba 100644
--- a/helpers/env-variables.ts
+++ b/helpers/env-variables.ts
@@ -5,6 +5,7 @@ import {
   ModelConfig,
   TemplateDataSource,
   TemplateFramework,
+  TemplateType,
   TemplateVectorDB,
 } from "./types";
 
@@ -378,6 +379,36 @@ const getSystemPromptEnv = (tools?: Tool[]): EnvVar => {
   };
 };
 
+const getTemplateEnvs = (template?: TemplateType): EnvVar[] => {
+  if (template === "multiagent") {
+    return [
+      {
+        name: "MESSAGE_QUEUE_PORT",
+      },
+      {
+        name: "CONTROL_PLANE_PORT",
+      },
+      {
+        name: "HUMAN_CONSUMER_PORT",
+      },
+      {
+        name: "AGENT_QUERY_ENGINE_PORT",
+        value: "8003",
+      },
+      {
+        name: "AGENT_QUERY_ENGINE_DESCRIPTION",
+        value: "Query information from the provided data",
+      },
+      {
+        name: "AGENT_DUMMY_PORT",
+        value: "8004",
+      },
+    ];
+  } else {
+    return [];
+  }
+};
+
 export const createBackendEnvFile = async (
   root: string,
   opts: {
@@ -386,6 +417,7 @@ export const createBackendEnvFile = async (
     modelConfig: ModelConfig;
     framework: TemplateFramework;
     dataSources?: TemplateDataSource[];
+    template?: TemplateType;
     port?: number;
     tools?: Tool[];
   },
@@ -406,6 +438,8 @@ export const createBackendEnvFile = async (
     ...getVectorDBEnvs(opts.vectorDb, opts.framework),
     ...getFrameworkEnvs(opts.framework, opts.port),
     ...getToolEnvs(opts.tools),
+    // Add template environment variables
+    ...getTemplateEnvs(opts.template),
     getSystemPromptEnv(opts.tools),
   ];
   // Render and write env file
diff --git a/helpers/index.ts b/helpers/index.ts
index e65998ba871d125d87eda0d1ef40549a10ee5294..c88e364313a8a80b9c8e8b634ba5a34d1da9d1c5 100644
--- a/helpers/index.ts
+++ b/helpers/index.ts
@@ -141,15 +141,18 @@ export const installTemplate = async (
     // This is a backend, so we need to copy the test data and create the env file.
 
     // Copy the environment file to the target directory.
-    await createBackendEnvFile(props.root, {
-      modelConfig: props.modelConfig,
-      llamaCloudKey: props.llamaCloudKey,
-      vectorDb: props.vectorDb,
-      framework: props.framework,
-      dataSources: props.dataSources,
-      port: props.externalPort,
-      tools: props.tools,
-    });
+    if (props.template === "streaming" || props.template === "multiagent") {
+      await createBackendEnvFile(props.root, {
+        modelConfig: props.modelConfig,
+        llamaCloudKey: props.llamaCloudKey,
+        vectorDb: props.vectorDb,
+        framework: props.framework,
+        dataSources: props.dataSources,
+        port: props.externalPort,
+        tools: props.tools,
+        template: props.template,
+      });
+    }
 
     if (props.dataSources.length > 0) {
       console.log("\nGenerating context data...\n");
diff --git a/helpers/python.ts b/helpers/python.ts
index 64faa4a00b2355f5c1f7e108e355f1d0c506eb7d..04f6a11a39efcebcdd39ba4854960c7e287d578d 100644
--- a/helpers/python.ts
+++ b/helpers/python.ts
@@ -320,20 +320,27 @@ export const installPythonTemplate = async ({
     cwd: path.join(compPath, "loaders", "python"),
   });
 
-  // Select and copy engine code based on data sources and tools
-  let engine;
-  tools = tools ?? [];
-  if (dataSources.length > 0 && tools.length === 0) {
-    console.log("\nNo tools selected - use optimized context chat engine\n");
-    engine = "chat";
-  } else {
-    engine = "agent";
-  }
-  await copy("**", enginePath, {
-    parents: true,
-    cwd: path.join(compPath, "engines", "python", engine),
+  // Copy settings.py to app
+  await copy("**", path.join(root, "app"), {
+    cwd: path.join(compPath, "settings", "python"),
   });
 
+  if (template === "streaming") {
+    // For the streaming template only:
+    // Select and copy engine code based on data sources and tools
+    let engine;
+    if (dataSources.length > 0 && (!tools || tools.length === 0)) {
+      console.log("\nNo tools selected - use optimized context chat engine\n");
+      engine = "chat";
+    } else {
+      engine = "agent";
+    }
+    await copy("**", enginePath, {
+      parents: true,
+      cwd: path.join(compPath, "engines", "python", engine),
+    });
+  }
+
   console.log("Adding additional dependencies");
 
   const addOnDependencies = getAdditionalDependencies(
diff --git a/helpers/types.ts b/helpers/types.ts
index 16debfc7bdbe8aa194ef97f7c70a907dcb070cfc..12c620198a788d36515979f6010dc807c395e014 100644
--- a/helpers/types.ts
+++ b/helpers/types.ts
@@ -16,7 +16,11 @@ export type ModelConfig = {
   dimensions: number;
   isConfigured(): boolean;
 };
-export type TemplateType = "streaming" | "community" | "llamapack";
+export type TemplateType =
+  | "streaming"
+  | "community"
+  | "llamapack"
+  | "multiagent";
 export type TemplateFramework = "nextjs" | "express" | "fastapi";
 export type TemplateUI = "html" | "shadcn";
 export type TemplateVectorDB =
diff --git a/questions.ts b/questions.ts
index d220090965ed3aeb3210ae7b7e8e96813581bd81..22811428a162c5f247ff6071ec7c7a71424f4aaa 100644
--- a/questions.ts
+++ b/questions.ts
@@ -9,6 +9,7 @@ import {
   TemplateDataSource,
   TemplateDataSourceType,
   TemplateFramework,
+  TemplateType,
 } from "./helpers";
 import { COMMUNITY_OWNER, COMMUNITY_REPO } from "./helpers/constant";
 import { EXAMPLE_FILE } from "./helpers/datasources";
@@ -122,6 +123,7 @@ const getVectorDbChoices = (framework: TemplateFramework) => {
 export const getDataSourceChoices = (
   framework: TemplateFramework,
   selectedDataSource: TemplateDataSource[],
+  template?: TemplateType,
 ) => {
   // If LlamaCloud is already selected, don't show any other options
   if (selectedDataSource.find((s) => s.type === "llamacloud")) {
@@ -137,10 +139,12 @@ export const getDataSourceChoices = (
     });
   }
   if (selectedDataSource === undefined || selectedDataSource.length === 0) {
-    choices.push({
-      title: "No data, just a simple chat or agent",
-      value: "none",
-    });
+    if (template !== "multiagent") {
+      choices.push({
+        title: "No data, just a simple chat or agent",
+        value: "none",
+      });
+    }
     choices.push({
       title:
         process.platform !== "linux"
@@ -281,25 +285,27 @@ export const askQuestions = async (
           },
         ];
 
-        const modelConfigured =
-          !program.llamapack && program.modelConfig.isConfigured();
-        // If using LlamaParse, require LlamaCloud API key
-        const llamaCloudKeyConfigured = program.useLlamaParse
-          ? program.llamaCloudKey || process.env["LLAMA_CLOUD_API_KEY"]
-          : true;
-        const hasVectorDb = program.vectorDb && program.vectorDb !== "none";
-        // Can run the app if all tools do not require configuration
-        if (
-          !hasVectorDb &&
-          modelConfigured &&
-          llamaCloudKeyConfigured &&
-          !toolsRequireConfig(program.tools)
-        ) {
-          actionChoices.push({
-            title:
-              "Generate code, install dependencies, and run the app (~2 min)",
-            value: "runApp",
-          });
+        if (program.template !== "multiagent") {
+          const modelConfigured =
+            !program.llamapack && program.modelConfig.isConfigured();
+          // If using LlamaParse, require LlamaCloud API key
+          const llamaCloudKeyConfigured = program.useLlamaParse
+            ? program.llamaCloudKey || process.env["LLAMA_CLOUD_API_KEY"]
+            : true;
+          const hasVectorDb = program.vectorDb && program.vectorDb !== "none";
+          // Can run the app if all tools do not require configuration
+          if (
+            !hasVectorDb &&
+            modelConfigured &&
+            llamaCloudKeyConfigured &&
+            !toolsRequireConfig(program.tools)
+          ) {
+            actionChoices.push({
+              title:
+                "Generate code, install dependencies, and run the app (~2 min)",
+              value: "runApp",
+            });
+          }
         }
 
         const { action } = await prompts(
@@ -331,7 +337,11 @@ export const askQuestions = async (
           name: "template",
           message: "Which template would you like to use?",
           choices: [
-            { title: "Chat", value: "streaming" },
+            { title: "Agentic RAG (single agent)", value: "streaming" },
+            {
+              title: "Multi-agent app (using llama-agents)",
+              value: "multiagent",
+            },
             {
               title: `Community template from ${styledRepo}`,
               value: "community",
@@ -395,6 +405,10 @@ export const askQuestions = async (
     return; // early return - no further questions needed for llamapack projects
   }
 
+  if (program.template === "multiagent") {
+    // TODO: multi-agents currently only supports FastAPI
+    program.framework = preferences.framework = "fastapi";
+  }
   if (!program.framework) {
     if (ciInfo.isCI) {
       program.framework = getPrefOrDefault("framework");
@@ -420,7 +434,10 @@ export const askQuestions = async (
     }
   }
 
-  if (program.framework === "express" || program.framework === "fastapi") {
+  if (
+    (program.framework === "express" || program.framework === "fastapi") &&
+    program.template === "streaming"
+  ) {
     // if a backend-only framework is selected, ask whether we should create a frontend
     if (program.frontend === undefined) {
       if (ciInfo.isCI) {
@@ -457,7 +474,7 @@ export const askQuestions = async (
     }
   }
 
-  if (!program.observability) {
+  if (!program.observability && program.template === "streaming") {
     if (ciInfo.isCI) {
       program.observability = getPrefOrDefault("observability");
     } else {
@@ -501,6 +518,7 @@ export const askQuestions = async (
         const choices = getDataSourceChoices(
           program.framework,
           program.dataSources,
+          program.template,
         );
         if (choices.length === 0) break;
         const { selectedSource } = await prompts(
@@ -695,7 +713,8 @@ export const askQuestions = async (
     }
   }
 
-  if (!program.tools) {
+  if (!program.tools && program.template === "streaming") {
+    // TODO: allow to select tools also for multi-agent framework
     if (ciInfo.isCI) {
       program.tools = getPrefOrDefault("tools");
     } else {
diff --git a/templates/components/settings/python/llmhub.py b/templates/components/settings/python/llmhub.py
new file mode 100644
index 0000000000000000000000000000000000000000..69e0e324971a0ea68c001e15e88320124483a80a
--- /dev/null
+++ b/templates/components/settings/python/llmhub.py
@@ -0,0 +1,61 @@
+from llama_index.embeddings.openai import OpenAIEmbedding
+from llama_index.core.settings import Settings
+from typing import Dict
+import os
+
+DEFAULT_MODEL = "gpt-3.5-turbo"
+DEFAULT_EMBEDDING_MODEL = "text-embedding-3-large"
+
+class TSIEmbedding(OpenAIEmbedding):
+    def __init__(self, **kwargs):
+        super().__init__(**kwargs)
+        self._query_engine = self._text_engine = self.model_name
+
+def llm_config_from_env() -> Dict:
+    from llama_index.core.constants import DEFAULT_TEMPERATURE
+
+    model = os.getenv("MODEL", DEFAULT_MODEL)
+    temperature = os.getenv("LLM_TEMPERATURE", DEFAULT_TEMPERATURE)
+    max_tokens = os.getenv("LLM_MAX_TOKENS")
+    api_key = os.getenv("T_SYSTEMS_LLMHUB_API_KEY")
+    api_base = os.getenv("T_SYSTEMS_LLMHUB_BASE_URL")
+
+    config = {
+        "model": model,
+        "api_key": api_key,
+        "api_base": api_base,
+        "temperature": float(temperature),
+        "max_tokens": int(max_tokens) if max_tokens is not None else None,
+    }
+    return config
+
+
+def embedding_config_from_env() -> Dict:
+    from llama_index.core.constants import DEFAULT_EMBEDDING_DIM
+    
+    model = os.getenv("EMBEDDING_MODEL", DEFAULT_EMBEDDING_MODEL)
+    dimension = os.getenv("EMBEDDING_DIM", DEFAULT_EMBEDDING_DIM)
+    api_key = os.getenv("T_SYSTEMS_LLMHUB_API_KEY")
+    api_base = os.getenv("T_SYSTEMS_LLMHUB_BASE_URL")
+
+    config = {
+        "model_name": model,
+        "dimension": int(dimension) if dimension is not None else None,
+        "api_key": api_key,
+        "api_base": api_base,
+    }
+    return config
+
+def init_llmhub():
+    from llama_index.llms.openai_like import OpenAILike
+
+    llm_configs = llm_config_from_env()
+    embedding_configs = embedding_config_from_env()
+
+    Settings.embed_model = TSIEmbedding(**embedding_configs)
+    Settings.llm = OpenAILike(
+        **llm_configs,
+        is_chat_model=True,
+        is_function_calling_model=False,
+        context_window=4096,
+    )
\ No newline at end of file
diff --git a/templates/types/streaming/fastapi/app/settings.py b/templates/components/settings/python/settings.py
similarity index 99%
rename from templates/types/streaming/fastapi/app/settings.py
rename to templates/components/settings/python/settings.py
index 7ec219755c9166b13c08cbe41d6856025a244983..2f7e4b3a37cad24249b528ef45f1a8371eb46ba1 100644
--- a/templates/types/streaming/fastapi/app/settings.py
+++ b/templates/components/settings/python/settings.py
@@ -2,7 +2,7 @@ import os
 from typing import Dict
 
 from llama_index.core.settings import Settings
-from .llmhub import init_llmhub
+
 
 def init_settings():
     model_provider = os.getenv("MODEL_PROVIDER")
@@ -20,6 +20,8 @@ def init_settings():
         case "azure-openai":
             init_azure_openai()
         case "t-systems":
+            from .llmhub import init_llmhub
+
             init_llmhub()
         case _:
             raise ValueError(f"Invalid model provider: {model_provider}")
@@ -147,5 +149,3 @@ def init_gemini():
 
     Settings.llm = Gemini(model=model_name)
     Settings.embed_model = GeminiEmbedding(model_name=embed_model_name)
-
-
diff --git a/templates/types/multiagent/fastapi/README-template.md b/templates/types/multiagent/fastapi/README-template.md
new file mode 100644
index 0000000000000000000000000000000000000000..b75c3c6b4fd6e664f7076311f95fff46d3f0d087
--- /dev/null
+++ b/templates/types/multiagent/fastapi/README-template.md
@@ -0,0 +1,50 @@
+This is a [LlamaIndex](https://www.llamaindex.ai/) project using [FastAPI](https://fastapi.tiangolo.com/) bootstrapped with [`create-llama`](https://github.com/run-llama/LlamaIndexTS/tree/main/packages/create-llama).
+
+## Getting Started
+
+First, setup the environment with poetry:
+
+> **_Note:_** This step is not needed if you are using the dev-container.
+
+```shell
+poetry install
+poetry shell
+```
+
+Then check the parameters that have been pre-configured in the `.env` file in this directory. (E.g. you might need to configure an `OPENAI_API_KEY` if you're using OpenAI as model provider).
+
+Second, generate the embeddings of the documents in the `./data` directory (if this folder exists - otherwise, skip this step):
+
+```shell
+poetry run generate
+```
+
+Third, run all the services in one command:
+
+```shell
+poetry run python main.py
+```
+
+You can monitor and test the agent services with `llama-agents` monitor TUI:
+
+```shell
+poetry run llama-agents monitor --control-plane-url http://127.0.0.1:8001
+```
+
+## Services:
+
+- Message queue (port 8000): To exchange the message between services
+- Control plane (port 8001): A gateway to manage the tasks and services.
+- Human consumer (port 8002): To handle result when the task is completed.
+- Agent service `query_engine` (port 8003): Agent that can query information from the configured LlamaIndex index.
+- Agent service `dummy_agent` (port 8004): A dummy agent that does nothing. Good starting point to add more agents.
+
+The ports listed above are set by default, but you can change them in the `.env` file.
+
+## Learn More
+
+To learn more about LlamaIndex, take a look at the following resources:
+
+- [LlamaIndex Documentation](https://docs.llamaindex.ai) - learn about LlamaIndex.
+
+You can check out [the LlamaIndex GitHub repository](https://github.com/run-llama/llama_index) - your feedback and contributions are welcome!
diff --git a/templates/types/multiagent/fastapi/app/agents/dummy/agent.py b/templates/types/multiagent/fastapi/app/agents/dummy/agent.py
new file mode 100644
index 0000000000000000000000000000000000000000..dde7fa3835e08eb777aa790189bc1fc72931ab6a
--- /dev/null
+++ b/templates/types/multiagent/fastapi/app/agents/dummy/agent.py
@@ -0,0 +1,33 @@
+from llama_agents import AgentService, SimpleMessageQueue
+from llama_index.core.agent import FunctionCallingAgentWorker
+from llama_index.core.tools import FunctionTool
+from llama_index.core.settings import Settings
+from app.utils import load_from_env
+
+
+DEFAULT_DUMMY_AGENT_DESCRIPTION = "I'm a dummy agent which does nothing."
+
+
+def dummy_function():
+    """
+    This function does nothing.
+    """
+    return ""
+
+
+def init_dummy_agent(message_queue: SimpleMessageQueue) -> AgentService:
+    agent = FunctionCallingAgentWorker(
+        tools=[FunctionTool.from_defaults(fn=dummy_function)],
+        llm=Settings.llm,
+        prefix_messages=[],
+    ).as_agent()
+
+    return AgentService(
+        service_name="dummy_agent",
+        agent=agent,
+        message_queue=message_queue.client,
+        description=load_from_env("AGENT_DUMMY_DESCRIPTION", throw_error=False)
+        or DEFAULT_DUMMY_AGENT_DESCRIPTION,
+        host=load_from_env("AGENT_DUMMY_HOST", throw_error=False) or "127.0.0.1",
+        port=int(load_from_env("AGENT_DUMMY_PORT")),
+    )
diff --git a/templates/types/multiagent/fastapi/app/agents/query_engine/agent.py b/templates/types/multiagent/fastapi/app/agents/query_engine/agent.py
new file mode 100644
index 0000000000000000000000000000000000000000..bee1f017e901e5b8290af08272165810d619be5f
--- /dev/null
+++ b/templates/types/multiagent/fastapi/app/agents/query_engine/agent.py
@@ -0,0 +1,52 @@
+import os
+from llama_agents import AgentService, SimpleMessageQueue
+from llama_index.core.agent import FunctionCallingAgentWorker
+from llama_index.core.tools import QueryEngineTool, ToolMetadata
+from llama_index.core.settings import Settings
+from app.engine.index import get_index
+from app.utils import load_from_env
+
+
+DEFAULT_QUERY_ENGINE_AGENT_DESCRIPTION = (
+    "Used to answer the questions using the provided context data."
+)
+
+
+def get_query_engine_tool() -> QueryEngineTool:
+    """
+    Provide an agent worker that can be used to query the index.
+    """
+    index = get_index()
+    if index is None:
+        raise ValueError("Index not found. Please create an index first.")
+    query_engine = index.as_query_engine(similarity_top_k=int(os.getenv("TOP_K", 3)))
+    return QueryEngineTool(
+        query_engine=query_engine,
+        metadata=ToolMetadata(
+            name="context_data",
+            description="""
+                Provide the provided context information. 
+                Use a detailed plain text question as input to the tool.
+            """,
+        ),
+    )
+
+
+def init_query_engine_agent(
+    message_queue: SimpleMessageQueue,
+) -> AgentService:
+    """
+    Initialize the agent service.
+    """
+    agent = FunctionCallingAgentWorker(
+        tools=[get_query_engine_tool()], llm=Settings.llm, prefix_messages=[]
+    ).as_agent()
+    return AgentService(
+        service_name="context_query_agent",
+        agent=agent,
+        message_queue=message_queue.client,
+        description=load_from_env("AGENT_QUERY_ENGINE_DESCRIPTION", throw_error=False)
+        or DEFAULT_QUERY_ENGINE_AGENT_DESCRIPTION,
+        host=load_from_env("AGENT_QUERY_ENGINE_HOST", throw_error=False) or "127.0.0.1",
+        port=int(load_from_env("AGENT_QUERY_ENGINE_PORT")),
+    )
diff --git a/templates/types/multiagent/fastapi/app/core/control_plane.py b/templates/types/multiagent/fastapi/app/core/control_plane.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ac3a1e34e3a36aef887e189dd48dd566b8c16c2
--- /dev/null
+++ b/templates/types/multiagent/fastapi/app/core/control_plane.py
@@ -0,0 +1,19 @@
+from llama_index.llms.openai import OpenAI
+from llama_agents import AgentOrchestrator, ControlPlaneServer
+from app.core.message_queue import message_queue
+from app.utils import load_from_env
+
+
+control_plane_host = (
+    load_from_env("CONTROL_PLANE_HOST", throw_error=False) or "127.0.0.1"
+)
+control_plane_port = load_from_env("CONTROL_PLANE_PORT", throw_error=False) or "8001"
+
+
+# setup control plane
+control_plane = ControlPlaneServer(
+    message_queue=message_queue,
+    orchestrator=AgentOrchestrator(llm=OpenAI()),
+    host=control_plane_host,
+    port=int(control_plane_port) if control_plane_port else None,
+)
diff --git a/templates/types/multiagent/fastapi/app/core/message_queue.py b/templates/types/multiagent/fastapi/app/core/message_queue.py
new file mode 100644
index 0000000000000000000000000000000000000000..cebca4454a10bc290d5864d636f8a3cf6488b132
--- /dev/null
+++ b/templates/types/multiagent/fastapi/app/core/message_queue.py
@@ -0,0 +1,12 @@
+from llama_agents import SimpleMessageQueue
+from app.utils import load_from_env
+
+message_queue_host = (
+    load_from_env("MESSAGE_QUEUE_HOST", throw_error=False) or "127.0.0.1"
+)
+message_queue_port = load_from_env("MESSAGE_QUEUE_PORT", throw_error=False) or "8000"
+
+message_queue = SimpleMessageQueue(
+    host=message_queue_host,
+    port=int(message_queue_port) if message_queue_port else None,
+)
diff --git a/templates/types/multiagent/fastapi/app/core/task_result.py b/templates/types/multiagent/fastapi/app/core/task_result.py
new file mode 100644
index 0000000000000000000000000000000000000000..9b0737e2f39eeb6b88f3a9664281270a4ecb885f
--- /dev/null
+++ b/templates/types/multiagent/fastapi/app/core/task_result.py
@@ -0,0 +1,88 @@
+import json
+from logging import getLogger
+from pathlib import Path
+from fastapi import FastAPI
+from typing import Dict, Optional
+from llama_agents import CallableMessageConsumer, QueueMessage
+from llama_agents.message_queues.base import BaseMessageQueue
+from llama_agents.message_consumers.base import BaseMessageQueueConsumer
+from llama_agents.message_consumers.remote import RemoteMessageConsumer
+from app.utils import load_from_env
+from app.core.message_queue import message_queue
+
+
+logger = getLogger(__name__)
+
+
+class TaskResultService:
+    def __init__(
+        self,
+        message_queue: BaseMessageQueue,
+        name: str = "human",
+        host: str = "127.0.0.1",
+        port: Optional[int] = 8002,
+    ) -> None:
+        self.name = name
+        self.host = host
+        self.port = port
+
+        self._message_queue = message_queue
+
+        # app
+        self._app = FastAPI()
+        self._app.add_api_route(
+            "/", self.home, methods=["GET"], tags=["Human Consumer"]
+        )
+        self._app.add_api_route(
+            "/process_message",
+            self.process_message,
+            methods=["POST"],
+            tags=["Human Consumer"],
+        )
+
+    @property
+    def message_queue(self) -> BaseMessageQueue:
+        return self._message_queue
+
+    def as_consumer(self, remote: bool = False) -> BaseMessageQueueConsumer:
+        if remote:
+            return RemoteMessageConsumer(
+                url=(
+                    f"http://{self.host}:{self.port}/process_message"
+                    if self.port
+                    else f"http://{self.host}/process_message"
+                ),
+                message_type=self.name,
+            )
+
+        return CallableMessageConsumer(
+            message_type=self.name,
+            handler=self.process_message,
+        )
+
+    async def process_message(self, message: QueueMessage) -> None:
+        Path("task_results").mkdir(exist_ok=True)
+        with open("task_results/task_results.json", "+a") as f:
+            json.dump(message.model_dump(), f)
+            f.write("\n")
+
+    async def home(self) -> Dict[str, str]:
+        return {"message": "hello, human."}
+
+    async def register_to_message_queue(self) -> None:
+        """Register to the message queue."""
+        await self.message_queue.register_consumer(self.as_consumer(remote=True))
+
+
+human_consumer_host = (
+    load_from_env("HUMAN_CONSUMER_HOST", throw_error=False) or "127.0.0.1"
+)
+human_consumer_port = load_from_env("HUMAN_CONSUMER_PORT", throw_error=False) or "8002"
+
+
+human_consumer_server = TaskResultService(
+    message_queue=message_queue,
+    host=human_consumer_host,
+    port=int(human_consumer_port) if human_consumer_port else None,
+    name="human",
+)
diff --git a/templates/types/multiagent/fastapi/app/utils.py b/templates/types/multiagent/fastapi/app/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ad0c2647dfcbe60d2d09acc5370f8bd0188f56a
--- /dev/null
+++ b/templates/types/multiagent/fastapi/app/utils.py
@@ -0,0 +1,8 @@
+import os
+
+
+def load_from_env(var: str, throw_error: bool = True) -> str:
+    res = os.getenv(var)
+    if res is None and throw_error:
+        raise ValueError(f"Missing environment variable: {var}")
+    return res
\ No newline at end of file
diff --git a/templates/types/multiagent/fastapi/main.py b/templates/types/multiagent/fastapi/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..93fb6c47454337cf33b3b67cc50c54ba5f42e54e
--- /dev/null
+++ b/templates/types/multiagent/fastapi/main.py
@@ -0,0 +1,27 @@
+from dotenv import load_dotenv
+from app.settings import init_settings
+
+load_dotenv()
+init_settings()
+
+from llama_agents import ServerLauncher
+from app.core.message_queue import message_queue
+from app.core.control_plane import control_plane
+from app.core.task_result import human_consumer_server
+from app.agents.query_engine.agent import init_query_engine_agent
+from app.agents.dummy.agent import init_dummy_agent
+
+agents = [
+    init_query_engine_agent(message_queue),
+    init_dummy_agent(message_queue),
+]
+
+launcher = ServerLauncher(
+    agents,
+    control_plane,
+    message_queue,
+    additional_consumers=[human_consumer_server.as_consumer()],
+)
+
+if __name__ == "__main__":
+    launcher.launch_servers()
diff --git a/templates/types/multiagent/fastapi/pyproject.toml b/templates/types/multiagent/fastapi/pyproject.toml
new file mode 100644
index 0000000000000000000000000000000000000000..72bfa47662116c2bffdf42bced324e6122a78ef4
--- /dev/null
+++ b/templates/types/multiagent/fastapi/pyproject.toml
@@ -0,0 +1,20 @@
+[tool.poetry]
+name = "app"
+version = "0.1.0"
+description = ""
+authors = ["Marcus Schiesser <mail@marcusschiesser.de>"]
+readme = "README.md"
+
+[tool.poetry.scripts]
+generate = "app.engine.generate:generate_datasource"
+
+[tool.poetry.dependencies]
+python = "^3.11"
+llama-agents = "^0.0.3"
+llama-index-agent-openai = "^0.2.7"
+llama-index-embeddings-openai = "^0.1.10"
+llama-index-llms-openai = "^0.1.23"
+
+[build-system]
+requires = ["poetry-core"]
+build-backend = "poetry.core.masonry.api"
\ No newline at end of file