diff --git a/.changeset/spicy-apricots-double.md b/.changeset/spicy-apricots-double.md
new file mode 100644
index 0000000000000000000000000000000000000000..24b490816383f871198b1d05f84a6fd0ee2ecbd3
--- /dev/null
+++ b/.changeset/spicy-apricots-double.md
@@ -0,0 +1,5 @@
+---
+"create-llama": patch
+---
+
+Add financial report as the default use case in the multi-agent template (Python).
diff --git a/create-app.ts b/create-app.ts
index 7e00e0bab268af6ce9e3a8a268fda80f01a6bb3d..144fdc7e207f90b6d5d2bc289d4698eb5470196e 100644
--- a/create-app.ts
+++ b/create-app.ts
@@ -41,6 +41,7 @@ export async function createApp({
   tools,
   useLlamaParse,
   observability,
+  agents,
 }: InstallAppArgs): Promise<void> {
   const root = path.resolve(appPath);
 
@@ -86,6 +87,7 @@ export async function createApp({
     tools,
     useLlamaParse,
     observability,
+    agents,
   };
 
   if (frontend) {
diff --git a/e2e/shared/multiagent_template.spec.ts b/e2e/shared/multiagent_template.spec.ts
index c330b2c95ebd2f1509121dffbb840fbc344547f5..f470b34925f674686aec2979b783e06d1e2d6a79 100644
--- a/e2e/shared/multiagent_template.spec.ts
+++ b/e2e/shared/multiagent_template.spec.ts
@@ -18,68 +18,72 @@ const templateUI: TemplateUI = "shadcn";
 const templatePostInstallAction: TemplatePostInstallAction = "runApp";
 const appType: AppType = templateFramework === "nextjs" ? "" : "--frontend";
 const userMessage = "Write a blog post about physical standards for letters";
+const templateAgents = ["financial_report", "blog"];
 
-test.describe(`Test multiagent template ${templateFramework} ${dataSource} ${templateUI} ${appType} ${templatePostInstallAction}`, async () => {
-  test.skip(
-    process.platform !== "linux" || process.env.DATASOURCE === "--no-files",
-    "The multiagent template currently only works with files. We also only run on Linux to speed up tests.",
-  );
-  let port: number;
-  let externalPort: number;
-  let cwd: string;
-  let name: string;
-  let appProcess: ChildProcess;
-  // Only test without using vector db for now
-  const vectorDb = "none";
+for (const agents of templateAgents) {
+  test.describe(`Test multiagent template ${agents} ${templateFramework} ${dataSource} ${templateUI} ${appType} ${templatePostInstallAction}`, async () => {
+    test.skip(
+      process.platform !== "linux" || process.env.DATASOURCE === "--no-files",
+      "The multiagent template currently only works with files. We also only run on Linux to speed up tests.",
+    );
+    let port: number;
+    let externalPort: number;
+    let cwd: string;
+    let name: string;
+    let appProcess: ChildProcess;
+    // Only test without using vector db for now
+    const vectorDb = "none";
 
-  test.beforeAll(async () => {
-    port = Math.floor(Math.random() * 10000) + 10000;
-    externalPort = port + 1;
-    cwd = await createTestDir();
-    const result = await runCreateLlama({
-      cwd,
-      templateType: "multiagent",
-      templateFramework,
-      dataSource,
-      vectorDb,
-      port,
-      externalPort,
-      postInstallAction: templatePostInstallAction,
-      templateUI,
-      appType,
+    test.beforeAll(async () => {
+      port = Math.floor(Math.random() * 10000) + 10000;
+      externalPort = port + 1;
+      cwd = await createTestDir();
+      const result = await runCreateLlama({
+        cwd,
+        templateType: "multiagent",
+        templateFramework,
+        dataSource,
+        vectorDb,
+        port,
+        externalPort,
+        postInstallAction: templatePostInstallAction,
+        templateUI,
+        appType,
+        agents,
+      });
+      name = result.projectName;
+      appProcess = result.appProcess;
     });
-    name = result.projectName;
-    appProcess = result.appProcess;
-  });
 
-  test("App folder should exist", async () => {
-    const dirExists = fs.existsSync(path.join(cwd, name));
-    expect(dirExists).toBeTruthy();
-  });
+    test("App folder should exist", async () => {
+      const dirExists = fs.existsSync(path.join(cwd, name));
+      expect(dirExists).toBeTruthy();
+    });
 
-  test("Frontend should have a title", async ({ page }) => {
-    await page.goto(`http://localhost:${port}`);
-    await expect(page.getByText("Built by LlamaIndex")).toBeVisible();
-  });
+    test("Frontend should have a title", async ({ page }) => {
+      await page.goto(`http://localhost:${port}`);
+      await expect(page.getByText("Built by LlamaIndex")).toBeVisible();
+    });
 
-  test("Frontend should be able to submit a message and receive the start of a streamed response", async ({
-    page,
-  }) => {
-    await page.goto(`http://localhost:${port}`);
-    await page.fill("form textarea", userMessage);
+    test("Frontend should be able to submit a message and receive the start of a streamed response", async ({
+      page,
+    }) => {
+      await page.goto(`http://localhost:${port}`);
+      await page.fill("form textarea", userMessage);
 
-    const responsePromise = page.waitForResponse((res) =>
-      res.url().includes("/api/chat"),
-    );
+      const responsePromise = page.waitForResponse((res) =>
+        res.url().includes("/api/chat"),
+      );
 
-    await page.click("form button[type=submit]");
+      await page.click("form button[type=submit]");
 
-    const response = await responsePromise;
-    expect(response.ok()).toBeTruthy();
-  });
+      const response = await responsePromise;
+      expect(response.ok()).toBeTruthy();
+    });
 
-  // clean processes
-  test.afterAll(async () => {
-    appProcess?.kill();
+    // clean processes
+    test.afterAll(async () => {
+      appProcess?.kill();
+    });
   });
-});
+}
diff --git a/e2e/utils.ts b/e2e/utils.ts
index 44a40d11ad99a1412d5a78c25ebe7b23bf3b3b2c..a835f0f20f8699791b07f186f5f8229ed1fb2da1 100644
--- a/e2e/utils.ts
+++ b/e2e/utils.ts
@@ -34,6 +34,7 @@ export type RunCreateLlamaOptions = {
   tools?: string;
   useLlamaParse?: boolean;
   observability?: string;
+  agents?: string;
 };
 
 export async function runCreateLlama({
@@ -52,6 +53,7 @@ export async function runCreateLlama({
   tools,
   useLlamaParse,
   observability,
+  agents,
 }: RunCreateLlamaOptions): Promise<CreateLlamaResult> {
   if (!process.env.OPENAI_API_KEY || !process.env.LLAMA_CLOUD_API_KEY) {
     throw new Error(
@@ -119,6 +121,9 @@ export async function runCreateLlama({
   if (observability) {
     commandArgs.push("--observability", observability);
   }
+  if (templateType === "multiagent" && agents) {
+    commandArgs.push("--agents", agents);
+  }
 
   const command = commandArgs.join(" ");
   console.log(`running command '${command}' in ${cwd}`);
diff --git a/helpers/datasources.ts b/helpers/datasources.ts
index dc80207e43bf6061f97f4ac40d27734cba6d6b1b..80b936b0db3cb914bcff2709cadefce3626c7ed8 100644
--- a/helpers/datasources.ts
+++ b/helpers/datasources.ts
@@ -11,6 +11,25 @@ export const EXAMPLE_FILE: TemplateDataSource = {
   },
 };
 
+export const EXAMPLE_10K_SEC_FILES: TemplateDataSource[] = [
+  {
+    type: "file",
+    config: {
+      url: new URL(
+        "https://s2.q4cdn.com/470004039/files/doc_earnings/2023/q4/filing/_10-K-Q4-2023-As-Filed.pdf",
+      ),
+    },
+  },
+  {
+    type: "file",
+    config: {
+      url: new URL(
+        "https://ir.tesla.com/_flysystem/s3/sec/000162828024002390/tsla-20231231-gen.pdf",
+      ),
+    },
+  },
+];
+
 export function getDataSources(
   files?: string,
   exampleFile?: boolean,
diff --git a/helpers/index.ts b/helpers/index.ts
index 56d07808f92480d2fe771429af73521eac52d05f..8525455d444503432babdde672a5df644073e9cd 100644
--- a/helpers/index.ts
+++ b/helpers/index.ts
@@ -96,6 +96,12 @@ async function generateContextData(
   }
 }
 
+const downloadFile = async (url: string, destPath: string) => {
+  const response = await fetch(url);
+  const fileBuffer = await response.arrayBuffer();
+  await fsExtra.writeFile(destPath, Buffer.from(fileBuffer));
+};
+
 const prepareContextData = async (
   root: string,
   dataSources: TemplateDataSource[],
@@ -103,12 +109,28 @@ const prepareContextData = async (
   await makeDir(path.join(root, "data"));
   for (const dataSource of dataSources) {
     const dataSourceConfig = dataSource?.config as FileSourceConfig;
-    // Copy local data
-    const dataPath = dataSourceConfig.path;
-
-    const destPath = path.join(root, "data", path.basename(dataPath));
-    console.log("Copying data from path:", dataPath);
-    await fsExtra.copy(dataPath, destPath);
+    // If the path is URLs, download the data and save it to the data directory
+    if ("url" in dataSourceConfig) {
+      console.log(
+        "Downloading file from URL:",
+        dataSourceConfig.url.toString(),
+      );
+      const destPath = path.join(
+        root,
+        "data",
+        path.basename(dataSourceConfig.url.toString()),
+      );
+      await downloadFile(dataSourceConfig.url.toString(), destPath);
+    } else {
+      // Copy local data
+      console.log("Copying data from path:", dataSourceConfig.path);
+      const destPath = path.join(
+        root,
+        "data",
+        path.basename(dataSourceConfig.path),
+      );
+      await fsExtra.copy(dataSourceConfig.path, destPath);
+    }
   }
 };
 
diff --git a/helpers/python.ts b/helpers/python.ts
index 58908276bdc6ebed5355606234a1931c94364a4f..6305739a21804703baee60358b03e720ecaeebda 100644
--- a/helpers/python.ts
+++ b/helpers/python.ts
@@ -362,6 +362,7 @@ export const installPythonTemplate = async ({
   postInstallAction,
   observability,
   modelConfig,
+  agents,
 }: Pick<
   InstallTemplateArgs,
   | "root"
@@ -373,6 +374,7 @@ export const installPythonTemplate = async ({
   | "postInstallAction"
   | "observability"
   | "modelConfig"
+  | "agents"
 >) => {
   console.log("\nInitializing Python project with template:", template, "\n");
   let templatePath;
@@ -443,6 +445,24 @@ export const installPythonTemplate = async ({
       cwd: path.join(compPath, "engines", "python", engine),
     });
 
+    // Copy agent code
+    if (template === "multiagent") {
+      if (agents) {
+        await copy("**", path.join(root), {
+          parents: true,
+          cwd: path.join(compPath, "agents", "python", agents),
+          rename: assetRelocator,
+        });
+      } else {
+        console.log(
+          red(
+            "There is no agent selected for multi-agent template. Please pick an agent to use via --agents flag.",
+          ),
+        );
+        process.exit(1);
+      }
+    }
+
     // Copy router code
     await copyRouterCode(root, tools ?? []);
   }
diff --git a/helpers/types.ts b/helpers/types.ts
index 89dd96644349b5fff06494babe5699e98d2c55fd..30a2ab357601d654f4848e8c736439f6bced6c71 100644
--- a/helpers/types.ts
+++ b/helpers/types.ts
@@ -48,10 +48,15 @@ export type TemplateDataSource = {
 };
 export type TemplateDataSourceType = "file" | "web" | "db";
 export type TemplateObservability = "none" | "traceloop" | "llamatrace";
+export type TemplateAgents = "financial_report" | "blog";
 // Config for both file and folder
-export type FileSourceConfig = {
-  path: string;
-};
+export type FileSourceConfig =
+  | {
+      path: string;
+    }
+  | {
+      url: URL;
+    };
 export type WebSourceConfig = {
   baseUrl?: string;
   prefix?: string;
@@ -94,4 +99,5 @@ export interface InstallTemplateArgs {
   postInstallAction?: TemplatePostInstallAction;
   tools?: Tool[];
   observability?: TemplateObservability;
+  agents?: TemplateAgents;
 }
diff --git a/index.ts b/index.ts
index 6ee0edeb13fa3403c4f7eaf369f83e322bca6146..de7f5b6492b8d3ee39dae666e8ad760fc9db8a08 100644
--- a/index.ts
+++ b/index.ts
@@ -208,6 +208,13 @@ const program = new Command(packageJson.name)
 `,
     false,
   )
+  .option(
+    "--agents <agents>",
+    `
+
+  Select which agents to use for the multi-agent template (e.g: financial_report, blog).
+`,
+  )
   .allowUnknownOption()
   .parse(process.argv);
 
diff --git a/questions/simple.ts b/questions/simple.ts
index 29930c1aefb244bd773c843eed7a73891558213c..195e30d8c18141e6e6085b26f3003bca30429361 100644
--- a/questions/simple.ts
+++ b/questions/simple.ts
@@ -1,5 +1,5 @@
 import prompts from "prompts";
-import { EXAMPLE_FILE } from "../helpers/datasources";
+import { EXAMPLE_10K_SEC_FILES, EXAMPLE_FILE } from "../helpers/datasources";
 import { askModelConfig } from "../helpers/providers";
 import { getTools } from "../helpers/tools";
 import { ModelConfig, TemplateFramework } from "../helpers/types";
@@ -9,7 +9,7 @@ import { askPostInstallAction, questionHandlers } from "./utils";
 type AppType =
   | "rag"
   | "code_artifact"
-  | "multiagent"
+  | "financial_report_agent"
   | "extractor"
   | "data_scientist";
 
@@ -31,8 +31,11 @@ export const askSimpleQuestions = async (
       choices: [
         { title: "Agentic RAG", value: "rag" },
         { title: "Data Scientist", value: "data_scientist" },
+        {
+          title: "Financial Report Generator (using Workflows)",
+          value: "financial_report_agent",
+        },
         { title: "Code Artifact Agent", value: "code_artifact" },
-        { title: "Multi-Agent Report Gen", value: "multiagent" },
         { title: "Structured extraction", value: "extractor" },
       ],
     },
@@ -42,20 +45,25 @@ export const askSimpleQuestions = async (
   let language: TemplateFramework = "fastapi";
   let llamaCloudKey = args.llamaCloudKey;
   let useLlamaCloud = false;
+
   if (appType !== "extractor") {
-    const { language: newLanguage } = await prompts(
-      {
-        type: "select",
-        name: "language",
-        message: "What language do you want to use?",
-        choices: [
-          { title: "Python (FastAPI)", value: "fastapi" },
-          { title: "Typescript (NextJS)", value: "nextjs" },
-        ],
-      },
-      questionHandlers,
-    );
-    language = newLanguage;
+    // Default financial report agent use case only supports Python
+    // TODO: Add support for Typescript frameworks
+    if (appType !== "financial_report_agent") {
+      const { language: newLanguage } = await prompts(
+        {
+          type: "select",
+          name: "language",
+          message: "What language do you want to use?",
+          choices: [
+            { title: "Python (FastAPI)", value: "fastapi" },
+            { title: "Typescript (NextJS)", value: "nextjs" },
+          ],
+        },
+        questionHandlers,
+      );
+      language = newLanguage;
+    }
 
     const { useLlamaCloud: newUseLlamaCloud } = await prompts(
       {
@@ -113,7 +121,10 @@ const convertAnswers = async (
   };
   const lookup: Record<
     AppType,
-    Pick<QuestionResults, "template" | "tools" | "frontend" | "dataSources"> & {
+    Pick<
+      QuestionResults,
+      "template" | "tools" | "frontend" | "dataSources" | "agents"
+    > & {
       modelConfig?: ModelConfig;
     }
   > = {
@@ -137,16 +148,13 @@ const convertAnswers = async (
       dataSources: [],
       modelConfig: MODEL_GPT4o,
     },
-    multiagent: {
+    financial_report_agent: {
       template: "multiagent",
-      tools: getTools([
-        "document_generator",
-        "wikipedia.WikipediaToolSpec",
-        "duckduckgo",
-        "img_gen",
-      ]),
+      agents: "financial_report",
+      tools: getTools(["document_generator", "interpreter"]),
+      dataSources: EXAMPLE_10K_SEC_FILES,
       frontend: true,
-      dataSources: [EXAMPLE_FILE],
+      modelConfig: MODEL_GPT4o,
     },
     extractor: {
       template: "extractor",
diff --git a/templates/components/multiagent/python/README-template.md b/templates/components/agents/python/blog/README-template.md
similarity index 89%
rename from templates/components/multiagent/python/README-template.md
rename to templates/components/agents/python/blog/README-template.md
index 761f19a4ddc2bf620c7180e8efedf9b26d53626c..162de0c8a3a2e75dfe72eb67b679965cdf45f874 100644
--- a/templates/components/multiagent/python/README-template.md
+++ b/templates/components/agents/python/blog/README-template.md
@@ -1,5 +1,3 @@
-This is a [LlamaIndex](https://www.llamaindex.ai/) multi-agents project using [Workflows](https://docs.llamaindex.ai/en/stable/understanding/workflows/).
-
 ## Overview
 
 This example is using three agents to generate a blog post:
@@ -25,7 +23,6 @@ poetry install
 ```
 
 Then check the parameters that have been pre-configured in the `.env` file in this directory. (E.g. you might need to configure an `OPENAI_API_KEY` if you're using OpenAI as model provider).
-
 Second, generate the embeddings of the documents in the `./data` directory:
 
 ```shell
@@ -39,7 +36,6 @@ poetry run python main.py
 ```
 
 Per default, the example is using the explicit workflow. You can change the example by setting the `EXAMPLE_TYPE` environment variable to `choreography` or `orchestrator`.
-
 The example provides one streaming API endpoint `/api/chat`.
 You can test the endpoint with the following curl request:
 
@@ -65,5 +61,4 @@ To learn more about LlamaIndex, take a look at the following resources:
 
 - [LlamaIndex Documentation](https://docs.llamaindex.ai) - learn about LlamaIndex.
 - [Workflows Introduction](https://docs.llamaindex.ai/en/stable/understanding/workflows/) - learn about LlamaIndex workflows.
-
-You can check out [the LlamaIndex GitHub repository](https://github.com/run-llama/llama_index) - your feedback and contributions are welcome!
+  You can check out [the LlamaIndex GitHub repository](https://github.com/run-llama/llama_index) - your feedback and contributions are welcome!
diff --git a/templates/components/multiagent/python/app/examples/choreography.py b/templates/components/agents/python/blog/app/agents/choreography.py
similarity index 90%
rename from templates/components/multiagent/python/app/examples/choreography.py
rename to templates/components/agents/python/blog/app/agents/choreography.py
index b194c20b73b47703c0aacde618e413fa5325cb6e..ce43388e071330678946fbf9c16363b9df616db7 100644
--- a/templates/components/multiagent/python/app/examples/choreography.py
+++ b/templates/components/agents/python/blog/app/agents/choreography.py
@@ -1,10 +1,10 @@
 from textwrap import dedent
 from typing import List, Optional
 
-from app.agents.multi import AgentCallingAgent
-from app.agents.single import FunctionCallingAgent
-from app.examples.publisher import create_publisher
-from app.examples.researcher import create_researcher
+from app.agents.publisher import create_publisher
+from app.agents.researcher import create_researcher
+from app.workflows.multi import AgentCallingAgent
+from app.workflows.single import FunctionCallingAgent
 from llama_index.core.chat_engine.types import ChatMessage
 
 
diff --git a/templates/components/multiagent/python/app/examples/orchestrator.py b/templates/components/agents/python/blog/app/agents/orchestrator.py
similarity index 90%
rename from templates/components/multiagent/python/app/examples/orchestrator.py
rename to templates/components/agents/python/blog/app/agents/orchestrator.py
index 7cf74c0cb1e1209c8dfd40286f19857dc8fdeb66..c4593ea44243df83f04fea6f6cb4fc32acd60b7f 100644
--- a/templates/components/multiagent/python/app/examples/orchestrator.py
+++ b/templates/components/agents/python/blog/app/agents/orchestrator.py
@@ -1,10 +1,10 @@
 from textwrap import dedent
 from typing import List, Optional
 
-from app.agents.multi import AgentOrchestrator
-from app.agents.single import FunctionCallingAgent
-from app.examples.publisher import create_publisher
-from app.examples.researcher import create_researcher
+from app.agents.publisher import create_publisher
+from app.agents.researcher import create_researcher
+from app.workflows.multi import AgentOrchestrator
+from app.workflows.single import FunctionCallingAgent
 from llama_index.core.chat_engine.types import ChatMessage
 
 
diff --git a/templates/components/multiagent/python/app/examples/publisher.py b/templates/components/agents/python/blog/app/agents/publisher.py
similarity index 96%
rename from templates/components/multiagent/python/app/examples/publisher.py
rename to templates/components/agents/python/blog/app/agents/publisher.py
index 2a170d88680d01b18523a5804ca78a24199fa244..5dfc6c7781d777c39181c405c98d165819baf4a1 100644
--- a/templates/components/multiagent/python/app/examples/publisher.py
+++ b/templates/components/agents/python/blog/app/agents/publisher.py
@@ -1,8 +1,8 @@
 from textwrap import dedent
 from typing import List, Tuple
 
-from app.agents.single import FunctionCallingAgent
 from app.engine.tools import ToolFactory
+from app.workflows.single import FunctionCallingAgent
 from llama_index.core.chat_engine.types import ChatMessage
 from llama_index.core.tools import FunctionTool
 
diff --git a/templates/components/multiagent/python/app/examples/researcher.py b/templates/components/agents/python/blog/app/agents/researcher.py
similarity index 98%
rename from templates/components/multiagent/python/app/examples/researcher.py
rename to templates/components/agents/python/blog/app/agents/researcher.py
index abcc2da385f19c793265780ae1480d8b1700b911..3b9ba5ed6665ff34de7a7efb109e7a12acc510a5 100644
--- a/templates/components/multiagent/python/app/examples/researcher.py
+++ b/templates/components/agents/python/blog/app/agents/researcher.py
@@ -2,9 +2,9 @@ import os
 from textwrap import dedent
 from typing import List
 
-from app.agents.single import FunctionCallingAgent
 from app.engine.index import IndexConfig, get_index
 from app.engine.tools import ToolFactory
+from app.workflows.single import FunctionCallingAgent
 from llama_index.core.chat_engine.types import ChatMessage
 from llama_index.core.tools import QueryEngineTool, ToolMetadata
 
diff --git a/templates/components/multiagent/python/app/examples/workflow.py b/templates/components/agents/python/blog/app/agents/workflow.py
similarity index 98%
rename from templates/components/multiagent/python/app/examples/workflow.py
rename to templates/components/agents/python/blog/app/agents/workflow.py
index 89bd6dbafd5e2f7749b0e5cf2ef06b77898fe86c..c7bfbe51e6370b0c1405afd4713ec5dfdfe8c275 100644
--- a/templates/components/multiagent/python/app/examples/workflow.py
+++ b/templates/components/agents/python/blog/app/agents/workflow.py
@@ -1,9 +1,9 @@
 from textwrap import dedent
 from typing import AsyncGenerator, List, Optional
 
-from app.agents.single import AgentRunEvent, AgentRunResult, FunctionCallingAgent
-from app.examples.publisher import create_publisher
-from app.examples.researcher import create_researcher
+from app.agents.publisher import create_publisher
+from app.agents.researcher import create_researcher
+from app.workflows.single import AgentRunEvent, AgentRunResult, FunctionCallingAgent
 from llama_index.core.chat_engine.types import ChatMessage
 from llama_index.core.prompts import PromptTemplate
 from llama_index.core.settings import Settings
diff --git a/templates/components/multiagent/python/app/engine/engine.py b/templates/components/agents/python/blog/app/engine/engine.py
similarity index 82%
rename from templates/components/multiagent/python/app/engine/engine.py
rename to templates/components/agents/python/blog/app/engine/engine.py
index 506179a02cf1d596d98a08e6175a28487620da70..78a79c4593494c8cb4382d2760d3962234b825f4 100644
--- a/templates/components/multiagent/python/app/engine/engine.py
+++ b/templates/components/agents/python/blog/app/engine/engine.py
@@ -2,9 +2,9 @@ import logging
 import os
 from typing import List, Optional
 
-from app.examples.choreography import create_choreography
-from app.examples.orchestrator import create_orchestrator
-from app.examples.workflow import create_workflow
+from app.agents.choreography import create_choreography
+from app.agents.orchestrator import create_orchestrator
+from app.agents.workflow import create_workflow
 from llama_index.core.chat_engine.types import ChatMessage
 from llama_index.core.workflow import Workflow
 
diff --git a/templates/components/agents/python/financial_report/README-template.md b/templates/components/agents/python/financial_report/README-template.md
new file mode 100644
index 0000000000000000000000000000000000000000..ba6d24fba8d3ae42637a680222dff0bba91060c4
--- /dev/null
+++ b/templates/components/agents/python/financial_report/README-template.md
@@ -0,0 +1,53 @@
+This is a [LlamaIndex](https://www.llamaindex.ai/) multi-agents project using [Workflows](https://docs.llamaindex.ai/en/stable/understanding/workflows/).
+
+## Getting Started
+
+First, setup the environment with poetry:
+
+> **_Note:_** This step is not needed if you are using the dev-container.
+
+```shell
+poetry install
+```
+
+Then check the parameters that have been pre-configured in the `.env` file in this directory. (E.g. you might need to configure an `OPENAI_API_KEY` if you're using OpenAI as model provider and `E2B_API_KEY` for the [E2B's code interpreter tool](https://e2b.dev/docs)).
+
+Second, generate the embeddings of the documents in the `./data` directory:
+
+```shell
+poetry run generate
+```
+
+Third, run the development server:
+
+```shell
+poetry run python main.py
+```
+
+The example provides one streaming API endpoint `/api/chat`.
+You can test the endpoint with the following curl request:
+
+```
+curl --location 'localhost:8000/api/chat' \
+--header 'Content-Type: application/json' \
+--data '{ "messages": [{ "role": "user", "content": "Create a report comparing the finances of Apple and Tesla" }] }'
+```
+
+You can start editing the API by modifying `app/api/routers/chat.py` or `app/financial_report/workflow.py`. The API auto-updates as you save the files.
+
+Open [http://localhost:8000/docs](http://localhost:8000/docs) with your browser to see the Swagger UI of the API.
+
+The API allows CORS for all origins to simplify development. You can change this behavior by setting the `ENVIRONMENT` environment variable to `prod`:
+
+```
+ENVIRONMENT=prod poetry run python main.py
+```
+
+## Learn More
+
+To learn more about LlamaIndex, take a look at the following resources:
+
+- [LlamaIndex Documentation](https://docs.llamaindex.ai) - learn about LlamaIndex.
+- [Workflows Introduction](https://docs.llamaindex.ai/en/stable/understanding/workflows/) - learn about LlamaIndex workflows.
+
+You can check out [the LlamaIndex GitHub repository](https://github.com/run-llama/llama_index) - your feedback and contributions are welcome!
diff --git a/templates/components/agents/python/financial_report/app/agents/analyst.py b/templates/components/agents/python/financial_report/app/agents/analyst.py
new file mode 100644
index 0000000000000000000000000000000000000000..f86b10d92af12d06eb8a62221659de31262e7766
--- /dev/null
+++ b/templates/components/agents/python/financial_report/app/agents/analyst.py
@@ -0,0 +1,47 @@
+from textwrap import dedent
+from typing import List, Tuple
+
+from app.engine.tools import ToolFactory
+from app.workflows.single import FunctionCallingAgent
+from llama_index.core.chat_engine.types import ChatMessage
+from llama_index.core.tools import FunctionTool
+
+
+def _get_analyst_params() -> Tuple[List[type[FunctionTool]], str, str]:
+    tools = []
+    prompt_instructions = dedent(
+        """
+        You are an expert in analyzing financial data.
+        You are given a task and a set of financial data to analyze. Your task is to analyze the financial data and return a report.
+        Your response should include a detailed analysis of the financial data, including any trends, patterns, or insights that you find.
+        Construct the analysis in a textual format like tables would be great!
+        Don't need to synthesize the data, just analyze and provide your findings.
+        Always use the provided information, don't make up any information yourself.
+        """
+    )
+    description = "Expert in analyzing financial data"
+    configured_tools = ToolFactory.from_env(map_result=True)
+    # Check if the interpreter tool is configured
+    if "interpreter" in configured_tools.keys():
+        tools.extend(configured_tools["interpreter"])
+        prompt_instructions += dedent("""
+            You are able to visualize the financial data using code interpreter tool.
+            It's very useful to create and include visualizations to the report (make sure you include the right code and data for the visualization).
+            Never include any code into the report, just the visualization.
+        """)
+        description += (
+            ", able to visualize the financial data using code interpreter tool."
+        )
+    return tools, prompt_instructions, description
+
+
+def create_analyst(chat_history: List[ChatMessage]):
+    tools, prompt_instructions, description = _get_analyst_params()
+
+    return FunctionCallingAgent(
+        name="analyst",
+        tools=tools,
+        description=description,
+        system_prompt=dedent(prompt_instructions),
+        chat_history=chat_history,
+    )
diff --git a/templates/components/agents/python/financial_report/app/agents/reporter.py b/templates/components/agents/python/financial_report/app/agents/reporter.py
new file mode 100644
index 0000000000000000000000000000000000000000..b1337bb033c792a4268573ad73be68fc4fce0cbc
--- /dev/null
+++ b/templates/components/agents/python/financial_report/app/agents/reporter.py
@@ -0,0 +1,44 @@
+from textwrap import dedent
+from typing import List, Tuple
+
+from app.engine.tools import ToolFactory
+from app.workflows.single import FunctionCallingAgent
+from llama_index.core.chat_engine.types import ChatMessage
+from llama_index.core.tools import BaseTool
+
+
+def _get_reporter_params(
+    chat_history: List[ChatMessage],
+) -> Tuple[List[type[BaseTool]], str, str]:
+    tools: List[type[BaseTool]] = []
+    description = "Expert in representing a financial report"
+    prompt_instructions = dedent(
+        """
+        You are a report generation assistant tasked with producing a well-formatted report given parsed context.
+        Given a comprehensive analysis of the user request, your task is to synthesize the information and return a well-formatted report.
+
+        ## Instructions
+        You are responsible for representing the analysis in a well-formatted report. If tables or visualizations provided, add them to the right sections that are most relevant.
+        Use only the provided information to create the report. Do not make up any information yourself.
+        Finally, the report should be presented in markdown format.
+        """
+    )
+    configured_tools = ToolFactory.from_env(map_result=True)
+    if "document_generator" in configured_tools:  # type: ignore
+        tools.extend(configured_tools["document_generator"])  # type: ignore
+        prompt_instructions += (
+            "\nYou are also able to generate a file document (PDF/HTML) of the report."
+        )
+        description += " and generate a file document (PDF/HTML) of the report."
+    return tools, description, prompt_instructions
+
+
+def create_reporter(chat_history: List[ChatMessage]):
+    tools, description, prompt_instructions = _get_reporter_params(chat_history)
+    return FunctionCallingAgent(
+        name="reporter",
+        tools=tools,
+        description=description,
+        system_prompt=prompt_instructions,
+        chat_history=chat_history,
+    )
diff --git a/templates/components/agents/python/financial_report/app/agents/researcher.py b/templates/components/agents/python/financial_report/app/agents/researcher.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d1459a5f790b7329c43e43a3c4dda8a35179f08
--- /dev/null
+++ b/templates/components/agents/python/financial_report/app/agents/researcher.py
@@ -0,0 +1,105 @@
+import os
+from textwrap import dedent
+from typing import List, Optional
+
+from app.engine.index import IndexConfig, get_index
+from app.workflows.single import FunctionCallingAgent
+from llama_index.core.chat_engine.types import ChatMessage
+from llama_index.core.tools import BaseTool, QueryEngineTool, ToolMetadata
+from llama_index.indices.managed.llama_cloud import LlamaCloudIndex
+
+
+def _create_query_engine_tools(params=None) -> Optional[list[type[BaseTool]]]:
+    """
+    Provide an agent worker that can be used to query the index.
+    """
+    # Add query tool if index exists
+    index_config = IndexConfig(**(params or {}))
+    index = get_index(index_config)
+    if index is None:
+        return None
+
+    top_k = int(os.getenv("TOP_K", 5))
+
+    # Construct query engine tools
+    tools = []
+    # If index is LlamaCloudIndex, we need to add chunk and doc retriever tools
+    if isinstance(index, LlamaCloudIndex):
+        # Document retriever
+        doc_retriever = index.as_query_engine(
+            retriever_mode="files_via_content",
+            similarity_top_k=top_k,
+        )
+        chunk_retriever = index.as_query_engine(
+            retriever_mode="chunks",
+            similarity_top_k=top_k,
+        )
+        tools.append(
+            QueryEngineTool(
+                query_engine=doc_retriever,
+                metadata=ToolMetadata(
+                    name="document_retriever",
+                    description=dedent(
+                        """
+                        Document retriever that retrieves entire documents from the corpus.
+                        ONLY use for research questions that may require searching over entire research reports.
+                        Will be slower and more expensive than chunk-level retrieval but may be necessary.
+                        """
+                    ),
+                ),
+            )
+        )
+        tools.append(
+            QueryEngineTool(
+                query_engine=chunk_retriever,
+                metadata=ToolMetadata(
+                    name="chunk_retriever",
+                    description=dedent(
+                        """
+                        Retrieves a small set of relevant document chunks from the corpus.
+                        Use for research questions that want to look up specific facts from the knowledge corpus,
+                        and need entire documents.
+                        """
+                    ),
+                ),
+            )
+        )
+    else:
+        query_engine = index.as_query_engine(
+            **({"similarity_top_k": top_k} if top_k != 0 else {})
+        )
+        tools.append(
+            QueryEngineTool(
+                query_engine=query_engine,
+                metadata=ToolMetadata(
+                    name="retrieve_information",
+                    description="Use this tool to retrieve information about the text corpus from the index.",
+                ),
+            )
+        )
+    return tools
+
+
+def create_researcher(chat_history: List[ChatMessage], **kwargs):
+    """
+    Researcher is an agent that take responsibility for using tools to complete a given task.
+    """
+    tools = _create_query_engine_tools(**kwargs)
+
+    if tools is None:
+        raise ValueError("No tools found for researcher agent")
+
+    return FunctionCallingAgent(
+        name="researcher",
+        tools=tools,
+        description="expert in retrieving any unknown content from the corpus",
+        system_prompt=dedent(
+            """
+            You are a researcher agent. You are responsible for retrieving information from the corpus.
+            ## Instructions
+            + Don't synthesize the information, just return the whole retrieved information.
+            + Don't need to retrieve the information that is already provided in the chat history and response with: "There is no new information, please reuse the information from the conversation."
+            """
+        ),
+        chat_history=chat_history,
+    )
diff --git a/templates/components/agents/python/financial_report/app/agents/workflow.py b/templates/components/agents/python/financial_report/app/agents/workflow.py
new file mode 100644
index 0000000000000000000000000000000000000000..d995bab2eae5310fd2eb99f764968912c32c88be
--- /dev/null
+++ b/templates/components/agents/python/financial_report/app/agents/workflow.py
@@ -0,0 +1,175 @@
+from textwrap import dedent
+from typing import AsyncGenerator, List, Optional
+
+from app.agents.analyst import create_analyst
+from app.agents.reporter import create_reporter
+from app.agents.researcher import create_researcher
+from app.workflows.single import AgentRunEvent, AgentRunResult, FunctionCallingAgent
+from llama_index.core.chat_engine.types import ChatMessage
+from llama_index.core.prompts import PromptTemplate
+from llama_index.core.settings import Settings
+from llama_index.core.workflow import (
+    Context,
+    Event,
+    StartEvent,
+    StopEvent,
+    Workflow,
+    step,
+)
+
+
+def create_workflow(chat_history: Optional[List[ChatMessage]] = None, **kwargs):
+    researcher = create_researcher(
+        chat_history=chat_history,
+        **kwargs,
+    )
+
+    analyst = create_analyst(chat_history=chat_history)
+
+    reporter = create_reporter(chat_history=chat_history)
+
+    workflow = FinancialReportWorkflow(timeout=360, chat_history=chat_history)
+
+    workflow.add_workflows(
+        researcher=researcher,
+        analyst=analyst,
+        reporter=reporter,
+    )
+    return workflow
+
+
+class ResearchEvent(Event):
+    input: str
+
+
+class AnalyzeEvent(Event):
+    input: str
+
+
+class ReportEvent(Event):
+    input: str
+
+
+class FinancialReportWorkflow(Workflow):
+    def __init__(
+        self, timeout: int = 360, chat_history: Optional[List[ChatMessage]] = None
+    ):
+        super().__init__(timeout=timeout)
+        self.chat_history = chat_history or []
+
+    @step()
+    async def start(self, ctx: Context, ev: StartEvent) -> ResearchEvent | ReportEvent:
+        # set streaming
+        ctx.data["streaming"] = getattr(ev, "streaming", False)
+        # start the workflow with researching about a topic
+        ctx.data["task"] = ev.input
+        ctx.data["user_input"] = ev.input
+
+        # Decision-making process
+        decision = await self._decide_workflow(ev.input, self.chat_history)
+
+        if decision != "publish":
+            return ResearchEvent(input=f"Research for this task: {ev.input}")
+        else:
+            chat_history_str = "\n".join(
+                [f"{msg.role}: {msg.content}" for msg in self.chat_history]
+            )
+            return ReportEvent(
+                input=f"Create a report based on the chat history\n{chat_history_str}\n\n and task: {ev.input}"
+            )
+
+    async def _decide_workflow(
+        self, input: str, chat_history: List[ChatMessage]
+    ) -> str:
+        # TODO: Refactor this by using prompt generation
+        prompt_template = PromptTemplate(
+            dedent(
+                """
+                You are an expert in decision-making, helping people create financial reports for the provided data.
+                If the user doesn't need to add or update anything, respond with 'publish'.
+                Otherwise, respond with 'research'.
+
+                Here is the chat history:
+                {chat_history}
+
+                The current user request is:
+                {input}
+
+                Given the chat history and the new user request, decide whether to create a report based on existing information.
+                Decision (respond with either 'not_publish' or 'publish'):
+            """
+            )
+        )
+
+        chat_history_str = "\n".join(
+            [f"{msg.role}: {msg.content}" for msg in chat_history]
+        )
+        prompt = prompt_template.format(chat_history=chat_history_str, input=input)
+
+        output = await Settings.llm.acomplete(prompt)
+        decision = output.text.strip().lower()
+
+        return "publish" if decision == "publish" else "research"
+
+    @step()
+    async def research(
+        self, ctx: Context, ev: ResearchEvent, researcher: FunctionCallingAgent
+    ) -> AnalyzeEvent:
+        result: AgentRunResult = await self.run_agent(ctx, researcher, ev.input)
+        content = result.response.message.content
+        return AnalyzeEvent(
+            input=dedent(
+                f"""
+                Given the following research content:
+                {content}
+                Provide a comprehensive analysis of the data for the user's request: {ctx.data["task"]}
+                """
+            )
+        )
+
+    @step()
+    async def analyze(
+        self, ctx: Context, ev: AnalyzeEvent, analyst: FunctionCallingAgent
+    ) -> ReportEvent | StopEvent:
+        result: AgentRunResult = await self.run_agent(ctx, analyst, ev.input)
+        content = result.response.message.content
+        return ReportEvent(
+            input=dedent(
+                f"""
+                Given the following analysis:
+                {content}
+                Create a report for the user's request: {ctx.data["task"]}
+                """
+            )
+        )
+
+    @step()
+    async def report(
+        self, ctx: Context, ev: ReportEvent, reporter: FunctionCallingAgent
+    ) -> StopEvent:
+        try:
+            result: AgentRunResult = await self.run_agent(ctx, reporter, ev.input)
+            return StopEvent(result=result)
+        except Exception as e:
+            ctx.write_event_to_stream(
+                AgentRunEvent(
+                    name=reporter.name,
+                    msg=f"Error creating a report: {e}",
+                )
+            )
+            return StopEvent(result=None)
+
+    async def run_agent(
+        self,
+        ctx: Context,
+        agent: FunctionCallingAgent,
+        input: str,
+        streaming: bool = False,
+    ) -> AgentRunResult | AsyncGenerator:
+        handler = agent.run(input=input, streaming=streaming)
+        # bubble all events while running the executor to the planner
+        async for event in handler.stream_events():
+            # Don't write the StopEvent from sub task to the stream
+            if type(event) is not StopEvent:
+                ctx.write_event_to_stream(event)
+        return await handler
diff --git a/templates/components/agents/python/financial_report/app/engine/engine.py b/templates/components/agents/python/financial_report/app/engine/engine.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ea21ab29ae6a8581ede7b80d392e17a9bab2d87
--- /dev/null
+++ b/templates/components/agents/python/financial_report/app/engine/engine.py
@@ -0,0 +1,12 @@
+from typing import List, Optional
+
+from app.agents.workflow import create_workflow
+from llama_index.core.chat_engine.types import ChatMessage
+from llama_index.core.workflow import Workflow
+
+
+def get_chat_engine(
+    chat_history: Optional[List[ChatMessage]] = None, **kwargs
+) -> Workflow:
+    agent_workflow = create_workflow(chat_history, **kwargs)
+    return agent_workflow
diff --git a/templates/components/multiagent/python/app/api/routers/vercel_response.py b/templates/components/multiagent/python/app/api/routers/vercel_response.py
index 82f2e7056aba5bd63022cb129425d60d9656509f..2c23b6a758bb0ced0b6f308bd83cb9b0c497c310 100644
--- a/templates/components/multiagent/python/app/api/routers/vercel_response.py
+++ b/templates/components/multiagent/python/app/api/routers/vercel_response.py
@@ -4,9 +4,9 @@ import logging
 from typing import AsyncGenerator, List
 
 from aiostream import stream
-from app.agents.single import AgentRunEvent, AgentRunResult
 from app.api.routers.models import ChatData, Message
 from app.api.services.suggestion import NextQuestionSuggestion
+from app.workflows.single import AgentRunEvent, AgentRunResult
 from fastapi import Request
 from fastapi.responses import StreamingResponse
 
@@ -28,7 +28,6 @@ class VercelStreamResponse(StreamingResponse):
         super().__init__(content=content)
 
     async def content_generator(self, event_handler, events):
-        logger.info("Starting content_generator")
         stream = self._create_stream(
             self.request, self.chat_data, event_handler, events
         )
diff --git a/templates/components/multiagent/python/app/agents/multi.py b/templates/components/multiagent/python/app/workflows/multi.py
similarity index 96%
rename from templates/components/multiagent/python/app/agents/multi.py
rename to templates/components/multiagent/python/app/workflows/multi.py
index 1150385020d0f53f224474f403c3f837ca4ff36b..5ec62d0db75232ccaa8ea329939fb72304a744f8 100644
--- a/templates/components/multiagent/python/app/agents/multi.py
+++ b/templates/components/multiagent/python/app/workflows/multi.py
@@ -1,7 +1,7 @@
 from typing import Any, List
 
-from app.agents.planner import StructuredPlannerAgent
-from app.agents.single import (
+from app.workflows.planner import StructuredPlannerAgent
+from app.workflows.single import (
     AgentRunResult,
     ContextAwareTool,
     FunctionCallingAgent,
diff --git a/templates/components/multiagent/python/app/agents/planner.py b/templates/components/multiagent/python/app/workflows/planner.py
similarity index 99%
rename from templates/components/multiagent/python/app/agents/planner.py
rename to templates/components/multiagent/python/app/workflows/planner.py
index 8bb4fd721eec549639546f3c27e99051ef484927..339bb739b5cbe638934f2052f6c512982711d58f 100644
--- a/templates/components/multiagent/python/app/agents/planner.py
+++ b/templates/components/multiagent/python/app/workflows/planner.py
@@ -2,7 +2,7 @@ import uuid
 from enum import Enum
 from typing import Any, AsyncGenerator, Dict, List, Optional, Tuple, Union
 
-from app.agents.single import AgentRunEvent, AgentRunResult, FunctionCallingAgent
+from app.workflows.single import AgentRunEvent, AgentRunResult, FunctionCallingAgent
 from llama_index.core.agent.runner.planner import (
     DEFAULT_INITIAL_PLAN_PROMPT,
     DEFAULT_PLAN_REFINE_PROMPT,
diff --git a/templates/components/multiagent/python/app/agents/single.py b/templates/components/multiagent/python/app/workflows/single.py
similarity index 100%
rename from templates/components/multiagent/python/app/agents/single.py
rename to templates/components/multiagent/python/app/workflows/single.py