diff --git a/.changeset/metal-cherries-sin.md b/.changeset/metal-cherries-sin.md
new file mode 100644
index 0000000000000000000000000000000000000000..a9d5b36564a9f0320ac9dc98c8279f8139dc69a4
--- /dev/null
+++ b/.changeset/metal-cherries-sin.md
@@ -0,0 +1,5 @@
+---
+"create-llama": patch
+---
+
+Optimize Typescript multi-agent code
diff --git a/.changeset/serious-suits-turn.md b/.changeset/serious-suits-turn.md
new file mode 100644
index 0000000000000000000000000000000000000000..4b26ed3364a6ada70eb049b2e521072f35bc5439
--- /dev/null
+++ b/.changeset/serious-suits-turn.md
@@ -0,0 +1,5 @@
+---
+"create-llama": patch
+---
+
+Add form filling use case (Typescript)
diff --git a/e2e/shared/multiagent_template.spec.ts b/e2e/shared/multiagent_template.spec.ts
index 955e73453eac909fb2c296a240bd9c95bdb50d3c..52721cf5c874ae49741adaed94f4ca0ee7dec1ff 100644
--- a/e2e/shared/multiagent_template.spec.ts
+++ b/e2e/shared/multiagent_template.spec.ts
@@ -26,10 +26,6 @@ for (const agents of templateAgents) {
       process.platform !== "linux" || process.env.DATASOURCE === "--no-files",
       "The multiagent template currently only works with files. We also only run on Linux to speed up tests.",
     );
-    test.skip(
-      agents === "form_filling" && templateFramework !== "fastapi",
-      "Form filling is currently only supported with FastAPI.",
-    );
     let port: number;
     let externalPort: number;
     let cwd: string;
diff --git a/helpers/typescript.ts b/helpers/typescript.ts
index da6bd62cb415ac745a7fdbc242a9aa5e2aba219e..b443286f2806b11eed6d48e4ba150142418112ff 100644
--- a/helpers/typescript.ts
+++ b/helpers/typescript.ts
@@ -136,19 +136,22 @@ export const installTSTemplate = async ({
     // Copy agents use case code for multiagent template
     if (agents) {
       console.log("\nCopying agent:", agents, "\n");
+      const useCasePath = path.join(compPath, "agents", "typescript", agents);
+      const agentsCodePath = path.join(useCasePath, "workflow");
 
-      const agentsCodePath = path.join(
-        compPath,
-        "agents",
-        "typescript",
-        agents,
-      );
-
+      // Copy agent codes
       await copy("**", path.join(root, relativeEngineDestPath, "workflow"), {
         parents: true,
         cwd: agentsCodePath,
         rename: assetRelocator,
       });
+
+      // Copy use case files to project root
+      await copy("*.*", path.join(root), {
+        parents: true,
+        cwd: useCasePath,
+        rename: assetRelocator,
+      });
     } else {
       console.log(
         red(
diff --git a/questions/simple.ts b/questions/simple.ts
index 50ae904d759798984991531d3301784a6860eea7..16052b39ce5db3dcb3e2922d465d04c57f4cfc83 100644
--- a/questions/simple.ts
+++ b/questions/simple.ts
@@ -52,22 +52,19 @@ export const askSimpleQuestions = async (
   let useLlamaCloud = false;
 
   if (appType !== "extractor") {
-    // TODO: Add TS support for form filling use case
-    if (appType !== "form_filling") {
-      const { language: newLanguage } = await prompts(
-        {
-          type: "select",
-          name: "language",
-          message: "What language do you want to use?",
-          choices: [
-            { title: "Python (FastAPI)", value: "fastapi" },
-            { title: "Typescript (NextJS)", value: "nextjs" },
-          ],
-        },
-        questionHandlers,
-      );
-      language = newLanguage;
-    }
+    const { language: newLanguage } = await prompts(
+      {
+        type: "select",
+        name: "language",
+        message: "What language do you want to use?",
+        choices: [
+          { title: "Python (FastAPI)", value: "fastapi" },
+          { title: "Typescript (NextJS)", value: "nextjs" },
+        ],
+      },
+      questionHandlers,
+    );
+    language = newLanguage;
 
     const { useLlamaCloud: newUseLlamaCloud } = await prompts(
       {
diff --git a/templates/components/agents/typescript/blog/factory.ts b/templates/components/agents/typescript/blog/factory.ts
deleted file mode 100644
index 0e341ca218983a44adbe8359ca3b94299c3189ba..0000000000000000000000000000000000000000
--- a/templates/components/agents/typescript/blog/factory.ts
+++ /dev/null
@@ -1,230 +0,0 @@
-import {
-  Context,
-  StartEvent,
-  StopEvent,
-  Workflow,
-  WorkflowEvent,
-} from "@llamaindex/core/workflow";
-import { Message } from "ai";
-import { ChatMessage, ChatResponseChunk, Settings } from "llamaindex";
-import { getAnnotations } from "../llamaindex/streaming/annotations";
-import {
-  createPublisher,
-  createResearcher,
-  createReviewer,
-  createWriter,
-} from "./agents";
-import { AgentInput, AgentRunEvent } from "./type";
-
-const TIMEOUT = 360 * 1000;
-const MAX_ATTEMPTS = 2;
-
-class ResearchEvent extends WorkflowEvent<{ input: string }> {}
-class WriteEvent extends WorkflowEvent<{
-  input: string;
-  isGood: boolean;
-}> {}
-class ReviewEvent extends WorkflowEvent<{ input: string }> {}
-class PublishEvent extends WorkflowEvent<{ input: string }> {}
-
-const prepareChatHistory = (chatHistory: Message[]): ChatMessage[] => {
-  // By default, the chat history only contains the assistant and user messages
-  // all the agents messages are stored in annotation data which is not visible to the LLM
-
-  const MAX_AGENT_MESSAGES = 10;
-  const agentAnnotations = getAnnotations<{ agent: string; text: string }>(
-    chatHistory,
-    { role: "assistant", type: "agent" },
-  ).slice(-MAX_AGENT_MESSAGES);
-
-  const agentMessages = agentAnnotations
-    .map(
-      (annotation) =>
-        `\n<${annotation.data.agent}>\n${annotation.data.text}\n</${annotation.data.agent}>`,
-    )
-    .join("\n");
-
-  const agentContent = agentMessages
-    ? "Here is the previous conversation of agents:\n" + agentMessages
-    : "";
-
-  if (agentContent) {
-    const agentMessage: ChatMessage = {
-      role: "assistant",
-      content: agentContent,
-    };
-    return [
-      ...chatHistory.slice(0, -1),
-      agentMessage,
-      chatHistory.slice(-1)[0],
-    ] as ChatMessage[];
-  }
-  return chatHistory as ChatMessage[];
-};
-
-export const createWorkflow = (messages: Message[], params?: any) => {
-  const chatHistoryWithAgentMessages = prepareChatHistory(messages);
-  const runAgent = async (
-    context: Context,
-    agent: Workflow,
-    input: AgentInput,
-  ) => {
-    const run = agent.run(new StartEvent({ input }));
-    for await (const event of agent.streamEvents()) {
-      if (event.data instanceof AgentRunEvent) {
-        context.writeEventToStream(event.data);
-      }
-    }
-    return await run;
-  };
-
-  const start = async (context: Context, ev: StartEvent) => {
-    context.set("task", ev.data.input);
-
-    const chatHistoryStr = chatHistoryWithAgentMessages
-      .map((msg) => `${msg.role}: ${msg.content}`)
-      .join("\n");
-
-    // Decision-making process
-    const decision = await decideWorkflow(ev.data.input, chatHistoryStr);
-
-    if (decision !== "publish") {
-      return new ResearchEvent({
-        input: `Research for this task: ${ev.data.input}`,
-      });
-    } else {
-      return new PublishEvent({
-        input: `Publish content based on the chat history\n${chatHistoryStr}\n\n and task: ${ev.data.input}`,
-      });
-    }
-  };
-
-  const decideWorkflow = async (task: string, chatHistoryStr: string) => {
-    const llm = Settings.llm;
-
-    const prompt = `You are an expert in decision-making, helping people write and publish blog posts.
-If the user is asking for a file or to publish content, respond with 'publish'.
-If the user requests to write or update a blog post, respond with 'not_publish'.
-
-Here is the chat history:
-${chatHistoryStr}
-
-The current user request is:
-${task}
-
-Given the chat history and the new user request, decide whether to publish based on existing information.
-Decision (respond with either 'not_publish' or 'publish'):`;
-
-    const output = await llm.complete({ prompt: prompt });
-    const decision = output.text.trim().toLowerCase();
-    return decision === "publish" ? "publish" : "research";
-  };
-
-  const research = async (context: Context, ev: ResearchEvent) => {
-    const researcher = await createResearcher(
-      chatHistoryWithAgentMessages,
-      params,
-    );
-    const researchRes = await runAgent(context, researcher, {
-      message: ev.data.input,
-    });
-    const researchResult = researchRes.data.result;
-    return new WriteEvent({
-      input: `Write a blog post given this task: ${context.get("task")} using this research content: ${researchResult}`,
-      isGood: false,
-    });
-  };
-
-  const write = async (context: Context, ev: WriteEvent) => {
-    const writer = createWriter(chatHistoryWithAgentMessages);
-
-    context.set("attempts", context.get("attempts", 0) + 1);
-    const tooManyAttempts = context.get("attempts") > MAX_ATTEMPTS;
-    if (tooManyAttempts) {
-      context.writeEventToStream(
-        new AgentRunEvent({
-          name: "writer",
-          msg: `Too many attempts (${MAX_ATTEMPTS}) to write the blog post. Proceeding with the current version.`,
-        }),
-      );
-    }
-
-    if (ev.data.isGood || tooManyAttempts) {
-      // the blog post is good or too many attempts
-      // stream the final content
-      const result = await runAgent(context, writer, {
-        message: `Based on the reviewer's feedback, refine the post and return only the final version of the post. Here's the current version: ${ev.data.input}`,
-        streaming: true,
-      });
-      return result as unknown as StopEvent<AsyncGenerator<ChatResponseChunk>>;
-    }
-
-    const writeRes = await runAgent(context, writer, {
-      message: ev.data.input,
-    });
-    const writeResult = writeRes.data.result;
-    context.set("result", writeResult); // store the last result
-    return new ReviewEvent({ input: writeResult });
-  };
-
-  const review = async (context: Context, ev: ReviewEvent) => {
-    const reviewer = createReviewer(chatHistoryWithAgentMessages);
-    const reviewRes = await reviewer.run(
-      new StartEvent<AgentInput>({ input: { message: ev.data.input } }),
-    );
-    const reviewResult = reviewRes.data.result;
-    const oldContent = context.get("result");
-    const postIsGood = reviewResult.toLowerCase().includes("post is good");
-    context.writeEventToStream(
-      new AgentRunEvent({
-        name: "reviewer",
-        msg: `The post is ${postIsGood ? "" : "not "}good enough for publishing. Sending back to the writer${
-          postIsGood ? " for publication." : "."
-        }`,
-      }),
-    );
-    if (postIsGood) {
-      return new WriteEvent({
-        input: "",
-        isGood: true,
-      });
-    }
-
-    return new WriteEvent({
-      input: `Improve the writing of a given blog post by using a given review.
-            Blog post:
-            \`\`\`
-            ${oldContent}
-            \`\`\`
-
-            Review:
-            \`\`\`
-            ${reviewResult}
-            \`\`\``,
-      isGood: false,
-    });
-  };
-
-  const publish = async (context: Context, ev: PublishEvent) => {
-    const publisher = await createPublisher(chatHistoryWithAgentMessages);
-
-    const publishResult = await runAgent(context, publisher, {
-      message: `${ev.data.input}`,
-      streaming: true,
-    });
-    return publishResult as unknown as StopEvent<
-      AsyncGenerator<ChatResponseChunk>
-    >;
-  };
-
-  const workflow = new Workflow({ timeout: TIMEOUT, validate: true });
-  workflow.addStep(StartEvent, start, {
-    outputs: [ResearchEvent, PublishEvent],
-  });
-  workflow.addStep(ResearchEvent, research, { outputs: WriteEvent });
-  workflow.addStep(WriteEvent, write, { outputs: [ReviewEvent, StopEvent] });
-  workflow.addStep(ReviewEvent, review, { outputs: WriteEvent });
-  workflow.addStep(PublishEvent, publish, { outputs: StopEvent });
-
-  return workflow;
-};
diff --git a/templates/components/agents/typescript/blog/tools.ts b/templates/components/agents/typescript/blog/tools.ts
deleted file mode 100644
index 012da6aec318bd4e7137368cf085ba2cb7d72856..0000000000000000000000000000000000000000
--- a/templates/components/agents/typescript/blog/tools.ts
+++ /dev/null
@@ -1,54 +0,0 @@
-import fs from "fs/promises";
-import { BaseToolWithCall, QueryEngineTool } from "llamaindex";
-import path from "path";
-import { getDataSource } from "../engine";
-import { createTools } from "../engine/tools/index";
-
-export const getQueryEngineTool = async (
-  params?: any,
-): Promise<QueryEngineTool | null> => {
-  const index = await getDataSource(params);
-  if (!index) {
-    return null;
-  }
-
-  const topK = process.env.TOP_K ? parseInt(process.env.TOP_K) : undefined;
-  return new QueryEngineTool({
-    queryEngine: index.asQueryEngine({
-      similarityTopK: topK,
-    }),
-    metadata: {
-      name: "query_index",
-      description: `Use this tool to retrieve information about the text corpus from the index.`,
-    },
-  });
-};
-
-export const getAvailableTools = async () => {
-  const configFile = path.join("config", "tools.json");
-  let toolConfig: any;
-  const tools: BaseToolWithCall[] = [];
-  try {
-    toolConfig = JSON.parse(await fs.readFile(configFile, "utf8"));
-  } catch (e) {
-    console.info(`Could not read ${configFile} file. Using no tools.`);
-  }
-  if (toolConfig) {
-    tools.push(...(await createTools(toolConfig)));
-  }
-  const queryEngineTool = await getQueryEngineTool();
-  if (queryEngineTool) {
-    tools.push(queryEngineTool);
-  }
-
-  return tools;
-};
-
-export const lookupTools = async (
-  toolNames: string[],
-): Promise<BaseToolWithCall[]> => {
-  const availableTools = await getAvailableTools();
-  return availableTools.filter((tool) =>
-    toolNames.includes(tool.metadata.name),
-  );
-};
diff --git a/templates/components/agents/typescript/blog/agents.ts b/templates/components/agents/typescript/blog/workflow/agents.ts
similarity index 90%
rename from templates/components/agents/typescript/blog/agents.ts
rename to templates/components/agents/typescript/blog/workflow/agents.ts
index 71f3123c7720bc8b2bb01f630512e7fb78adbe77..2c9930ef750c92a90208e0fef839a97915b179df 100644
--- a/templates/components/agents/typescript/blog/agents.ts
+++ b/templates/components/agents/typescript/blog/workflow/agents.ts
@@ -1,19 +1,16 @@
 import { ChatMessage } from "llamaindex";
+import { getTool } from "../engine/tools";
 import { FunctionCallingAgent } from "./single-agent";
-import { getQueryEngineTool, lookupTools } from "./tools";
+import { getQueryEngineTools } from "./tools";
 
-export const createResearcher = async (
-  chatHistory: ChatMessage[],
-  params?: any,
-) => {
-  const queryEngineTool = await getQueryEngineTool(params);
-  const tools = (
-    await lookupTools([
-      "wikipedia_tool",
-      "duckduckgo_search",
-      "image_generator",
-    ])
-  ).concat(queryEngineTool ? [queryEngineTool] : []);
+export const createResearcher = async (chatHistory: ChatMessage[]) => {
+  const queryEngineTools = await getQueryEngineTools();
+  const tools = [
+    await getTool("wikipedia_tool"),
+    await getTool("duckduckgo_search"),
+    await getTool("image_generator"),
+    ...(queryEngineTools ? queryEngineTools : []),
+  ].filter((tool) => tool !== undefined);
 
   return new FunctionCallingAgent({
     name: "researcher",
@@ -81,17 +78,17 @@ Example:
 };
 
 export const createPublisher = async (chatHistory: ChatMessage[]) => {
-  const tools = await lookupTools(["document_generator"]);
+  const tool = await getTool("document_generator");
   let systemPrompt = `You are an expert in publishing blog posts. You are given a task to publish a blog post. 
 If the writer says that there was an error, you should reply with the error and not publish the post.`;
-  if (tools.length > 0) {
+  if (tool) {
     systemPrompt = `${systemPrompt}. 
 If the user requests to generate a file, use the document_generator tool to generate the file and reply with the link to the file.
 Otherwise, simply return the content of the post.`;
   }
   return new FunctionCallingAgent({
     name: "publisher",
-    tools: tools,
+    tools: tool ? [tool] : [],
     systemPrompt: systemPrompt,
     chatHistory,
   });
diff --git a/templates/components/agents/typescript/blog/workflow/factory.ts b/templates/components/agents/typescript/blog/workflow/factory.ts
new file mode 100644
index 0000000000000000000000000000000000000000..dea054e8d355d11db51ae16d0d5d2548f1c9149d
--- /dev/null
+++ b/templates/components/agents/typescript/blog/workflow/factory.ts
@@ -0,0 +1,291 @@
+import {
+  HandlerContext,
+  StartEvent,
+  StopEvent,
+  Workflow,
+  WorkflowContext,
+  WorkflowEvent,
+} from "@llamaindex/workflow";
+import { ChatMessage, ChatResponseChunk, Settings } from "llamaindex";
+import {
+  createPublisher,
+  createResearcher,
+  createReviewer,
+  createWriter,
+} from "./agents";
+import {
+  FunctionCallingAgent,
+  FunctionCallingAgentInput,
+} from "./single-agent";
+import { AgentInput, AgentRunEvent } from "./type";
+
+const TIMEOUT = 360 * 1000;
+const MAX_ATTEMPTS = 2;
+
+class ResearchEvent extends WorkflowEvent<{ input: string }> {}
+class WriteEvent extends WorkflowEvent<{
+  input: string;
+  isGood: boolean;
+}> {}
+class ReviewEvent extends WorkflowEvent<{ input: string }> {}
+class PublishEvent extends WorkflowEvent<{ input: string }> {}
+
+type BlogContext = {
+  task: string;
+  attempts: number;
+  result: string;
+};
+
+export const createWorkflow = ({
+  chatHistory,
+  params,
+}: {
+  chatHistory: ChatMessage[];
+  params?: any;
+}) => {
+  const runAgent = async (
+    context: HandlerContext<BlogContext>,
+    agent: FunctionCallingAgent,
+    input: FunctionCallingAgentInput,
+  ) => {
+    const agentContext = agent.run(input, {
+      streaming: input.streaming ?? false,
+    });
+    for await (const event of agentContext) {
+      if (event instanceof AgentRunEvent) {
+        context.sendEvent(event);
+      }
+      if (event instanceof StopEvent) {
+        return event;
+      }
+    }
+    return null;
+  };
+
+  const start = async (
+    context: HandlerContext<BlogContext>,
+    ev: StartEvent<AgentInput>,
+  ) => {
+    const chatHistoryStr = chatHistory
+      .map((msg) => `${msg.role}: ${msg.content}`)
+      .join("\n");
+
+    // Decision-making process
+    const decision = await decideWorkflow(
+      ev.data.message.toString(),
+      chatHistoryStr,
+    );
+
+    if (decision !== "publish") {
+      return new ResearchEvent({
+        input: `Research for this task: ${JSON.stringify(context.data.task)}`,
+      });
+    } else {
+      return new PublishEvent({
+        input: `Publish content based on the chat history\n${chatHistoryStr}\n\n and task: ${context.data.task}`,
+      });
+    }
+  };
+
+  const decideWorkflow = async (task: string, chatHistoryStr: string) => {
+    const llm = Settings.llm;
+
+    const prompt = `You are an expert in decision-making, helping people write and publish blog posts.
+If the user is asking for a file or to publish content, respond with 'publish'.
+If the user requests to write or update a blog post, respond with 'not_publish'.
+
+Here is the chat history:
+${chatHistoryStr}
+
+The current user request is:
+${task}
+
+Given the chat history and the new user request, decide whether to publish based on existing information.
+Decision (respond with either 'not_publish' or 'publish'):`;
+
+    const output = await llm.complete({ prompt: prompt });
+    const decision = output.text.trim().toLowerCase();
+    return decision === "publish" ? "publish" : "research";
+  };
+
+  const research = async (
+    context: HandlerContext<BlogContext>,
+    ev: ResearchEvent,
+  ) => {
+    const researcher = await createResearcher(chatHistory);
+    const researchRes = await runAgent(context, researcher, {
+      displayName: "Researcher",
+      message: ev.data.input,
+    });
+    const researchResult = researchRes?.data;
+
+    return new WriteEvent({
+      input: `Write a blog post given this task: ${JSON.stringify(
+        context.data.task,
+      )} using this research content: ${researchResult}`,
+      isGood: false,
+    });
+  };
+
+  const write = async (
+    context: HandlerContext<BlogContext>,
+    ev: WriteEvent,
+  ) => {
+    const writer = createWriter(chatHistory);
+    context.data.attempts = context.data.attempts + 1;
+    const tooManyAttempts = context.data.attempts > MAX_ATTEMPTS;
+    if (tooManyAttempts) {
+      context.sendEvent(
+        new AgentRunEvent({
+          agent: "writer",
+          text: `Too many attempts (${MAX_ATTEMPTS}) to write the blog post. Proceeding with the current version.`,
+          type: "text",
+        }),
+      );
+    }
+
+    if (ev.data.isGood || tooManyAttempts) {
+      // the blog post is good or too many attempts
+      // stream the final content
+      const result = await runAgent(context, writer, {
+        message: `Based on the reviewer's feedback, refine the post and return only the final version of the post. Here's the current version: ${ev.data.input}`,
+        displayName: "Writer",
+        streaming: true,
+      });
+      return result as unknown as StopEvent<AsyncGenerator<ChatResponseChunk>>;
+    }
+
+    const writeRes = await runAgent(context, writer, {
+      message: ev.data.input,
+      displayName: "Writer",
+      streaming: false,
+    });
+    const writeResult = writeRes?.data;
+    context.data.result = writeResult; // store the last result
+
+    return new ReviewEvent({ input: writeResult });
+  };
+
+  const review = async (
+    context: HandlerContext<BlogContext>,
+    ev: ReviewEvent,
+  ) => {
+    const reviewer = createReviewer(chatHistory);
+    const reviewResult = (await runAgent(context, reviewer, {
+      message: ev.data.input,
+      displayName: "Reviewer",
+      streaming: false,
+    })) as unknown as StopEvent<string>;
+    const reviewResultStr = reviewResult.data;
+    const oldContent = context.data.result;
+    const postIsGood = reviewResultStr.toLowerCase().includes("post is good");
+    context.sendEvent(
+      new AgentRunEvent({
+        agent: "reviewer",
+        text: `The post is ${postIsGood ? "" : "not "}good enough for publishing. Sending back to the writer${
+          postIsGood ? " for publication." : "."
+        }`,
+        type: "text",
+      }),
+    );
+    if (postIsGood) {
+      return new WriteEvent({
+        input: "",
+        isGood: true,
+      });
+    }
+
+    return new WriteEvent({
+      input: `Improve the writing of a given blog post by using a given review.
+            Blog post:
+            \`\`\`
+            ${oldContent}
+            \`\`\`
+
+            Review:
+            \`\`\`
+            ${reviewResult}
+            \`\`\``,
+      isGood: false,
+    });
+  };
+
+  const publish = async (
+    context: HandlerContext<BlogContext>,
+    ev: PublishEvent,
+  ) => {
+    const publisher = await createPublisher(chatHistory);
+
+    const publishResult = await runAgent(context, publisher, {
+      message: `${ev.data.input}`,
+      displayName: "Publisher",
+      streaming: true,
+    });
+    return publishResult as unknown as StopEvent<
+      AsyncGenerator<ChatResponseChunk>
+    >;
+  };
+
+  const workflow: Workflow<
+    BlogContext,
+    AgentInput,
+    string | AsyncGenerator<boolean | ChatResponseChunk>
+  > = new Workflow();
+
+  workflow.addStep(
+    {
+      inputs: [StartEvent<AgentInput>],
+      outputs: [ResearchEvent, PublishEvent],
+    },
+    start,
+  );
+
+  workflow.addStep(
+    {
+      inputs: [ResearchEvent],
+      outputs: [WriteEvent],
+    },
+    research,
+  );
+
+  workflow.addStep(
+    {
+      inputs: [WriteEvent],
+      outputs: [ReviewEvent, StopEvent<AsyncGenerator<ChatResponseChunk>>],
+    },
+    write,
+  );
+
+  workflow.addStep(
+    {
+      inputs: [ReviewEvent],
+      outputs: [WriteEvent],
+    },
+    review,
+  );
+
+  workflow.addStep(
+    {
+      inputs: [PublishEvent],
+      outputs: [StopEvent],
+    },
+    publish,
+  );
+
+  // Overload run method to initialize the context
+  workflow.run = function (
+    input: AgentInput,
+  ): WorkflowContext<
+    AgentInput,
+    string | AsyncGenerator<boolean | ChatResponseChunk>,
+    BlogContext
+  > {
+    return Workflow.prototype.run.call(workflow, new StartEvent(input), {
+      task: input.message.toString(),
+      attempts: 0,
+      result: "",
+    });
+  };
+
+  return workflow;
+};
diff --git a/templates/components/agents/typescript/financial_report/README-template.md b/templates/components/agents/typescript/financial_report/README-template.md
new file mode 100644
index 0000000000000000000000000000000000000000..b6a0d46cab724cfd856d3a65322244809ce18310
--- /dev/null
+++ b/templates/components/agents/typescript/financial_report/README-template.md
@@ -0,0 +1,47 @@
+This is a [LlamaIndex](https://www.llamaindex.ai/) project using [Next.js](https://nextjs.org/) bootstrapped with [`create-llama`](https://github.com/run-llama/LlamaIndexTS/tree/main/packages/create-llama).
+
+## Getting Started
+
+First, install the dependencies:
+
+```
+npm install
+```
+
+Then check the parameters that have been pre-configured in the `.env` file in this directory.
+Make sure you have the `OPENAI_API_KEY` set.
+
+Second, generate the embeddings of the documents in the `./data` directory:
+
+```
+npm run generate
+```
+
+Third, run the development server:
+
+```
+npm run dev
+```
+
+Open [http://localhost:3000](http://localhost:3000) with your browser to see the chat UI.
+
+## Use Case: Filling Financial CSV Template
+
+You can start by sending an request on the chat UI to create a report comparing the finances of Apple and Tesla.
+Or you can test the `/api/chat` endpoint with the following curl request:
+
+```
+curl --location 'localhost:3000/api/chat' \
+--header 'Content-Type: application/json' \
+--data '{ "messages": [{ "role": "user", "content": "Create a report comparing the finances of Apple and Tesla" }] }'
+```
+
+## Learn More
+
+To learn more about LlamaIndex, take a look at the following resources:
+
+- [LlamaIndex Documentation](https://docs.llamaindex.ai) - learn about LlamaIndex (Python features).
+- [LlamaIndexTS Documentation](https://ts.llamaindex.ai/docs/llamaindex) - learn about LlamaIndex (Typescript features).
+- [Workflows Introduction](https://ts.llamaindex.ai/docs/llamaindex/guide/workflow) - learn about LlamaIndexTS workflows.
+
+You can check out [the LlamaIndexTS GitHub repository](https://github.com/run-llama/LlamaIndexTS) - your feedback and contributions are welcome!
diff --git a/templates/components/agents/typescript/financial_report/agents.ts b/templates/components/agents/typescript/financial_report/agents.ts
deleted file mode 100644
index ca86aa5ff72d21c986ca247ffc6b1cd428e50cc0..0000000000000000000000000000000000000000
--- a/templates/components/agents/typescript/financial_report/agents.ts
+++ /dev/null
@@ -1,65 +0,0 @@
-import { ChatMessage } from "llamaindex";
-import { FunctionCallingAgent } from "./single-agent";
-import { getQueryEngineTools, lookupTools } from "./tools";
-
-export const createResearcher = async (
-  chatHistory: ChatMessage[],
-  params?: any,
-) => {
-  const queryEngineTools = await getQueryEngineTools(params);
-
-  if (!queryEngineTools) {
-    throw new Error("Query engine tool not found");
-  }
-
-  return new FunctionCallingAgent({
-    name: "researcher",
-    tools: queryEngineTools,
-    systemPrompt: `You are a researcher agent. You are responsible for retrieving information from the corpus.
-## Instructions:
-+ Don't synthesize the information, just return the whole retrieved information.
-+ Don't need to retrieve the information that is already provided in the chat history and respond with: "There is no new information, please reuse the information from the conversation."
-`,
-    chatHistory,
-  });
-};
-
-export const createAnalyst = async (chatHistory: ChatMessage[]) => {
-  let systemPrompt = `You are an expert in analyzing financial data.
-You are given a task and a set of financial data to analyze. Your task is to analyze the financial data and return a report.
-Your response should include a detailed analysis of the financial data, including any trends, patterns, or insights that you find.
-Construct the analysis in textual format; including tables would be great!
-Don't need to synthesize the data, just analyze and provide your findings.
-Always use the provided information, don't make up any information yourself.`;
-  const tools = await lookupTools(["interpreter"]);
-  if (tools.length > 0) {
-    systemPrompt = `${systemPrompt}
-You are able to visualize the financial data using code interpreter tool.
-It's very useful to create and include visualizations in the report. Never include any code in the report, just the visualization.`;
-  }
-  return new FunctionCallingAgent({
-    name: "analyst",
-    tools: tools,
-    chatHistory,
-  });
-};
-
-export const createReporter = async (chatHistory: ChatMessage[]) => {
-  const tools = await lookupTools(["document_generator"]);
-  let systemPrompt = `You are a report generation assistant tasked with producing a well-formatted report given parsed context.
-Given a comprehensive analysis of the user request, your task is to synthesize the information and return a well-formatted report.
-
-## Instructions
-You are responsible for representing the analysis in a well-formatted report. If tables or visualizations are provided, add them to the most relevant sections.
-Finally, the report should be presented in markdown format.`;
-  if (tools.length > 0) {
-    systemPrompt = `${systemPrompt}. 
-You are also able to generate an HTML file of the report.`;
-  }
-  return new FunctionCallingAgent({
-    name: "reporter",
-    tools: tools,
-    systemPrompt: systemPrompt,
-    chatHistory,
-  });
-};
diff --git a/templates/components/agents/typescript/financial_report/factory.ts b/templates/components/agents/typescript/financial_report/factory.ts
deleted file mode 100644
index ea530969c7b74bebf8c605788193b7cf1a13eb05..0000000000000000000000000000000000000000
--- a/templates/components/agents/typescript/financial_report/factory.ts
+++ /dev/null
@@ -1,159 +0,0 @@
-import {
-  Context,
-  StartEvent,
-  StopEvent,
-  Workflow,
-  WorkflowEvent,
-} from "@llamaindex/core/workflow";
-import { Message } from "ai";
-import { ChatMessage, ChatResponseChunk, Settings } from "llamaindex";
-import { getAnnotations } from "../llamaindex/streaming/annotations";
-import { createAnalyst, createReporter, createResearcher } from "./agents";
-import { AgentInput, AgentRunEvent } from "./type";
-
-const TIMEOUT = 360 * 1000;
-const MAX_ATTEMPTS = 2;
-
-class ResearchEvent extends WorkflowEvent<{ input: string }> {}
-class AnalyzeEvent extends WorkflowEvent<{ input: string }> {}
-class ReportEvent extends WorkflowEvent<{ input: string }> {}
-
-const prepareChatHistory = (chatHistory: Message[]): ChatMessage[] => {
-  // By default, the chat history only contains the assistant and user messages
-  // all the agents messages are stored in annotation data which is not visible to the LLM
-
-  const MAX_AGENT_MESSAGES = 10;
-  const agentAnnotations = getAnnotations<{ agent: string; text: string }>(
-    chatHistory,
-    { role: "assistant", type: "agent" },
-  ).slice(-MAX_AGENT_MESSAGES);
-
-  const agentMessages = agentAnnotations
-    .map(
-      (annotation) =>
-        `\n<${annotation.data.agent}>\n${annotation.data.text}\n</${annotation.data.agent}>`,
-    )
-    .join("\n");
-
-  const agentContent = agentMessages
-    ? "Here is the previous conversation of agents:\n" + agentMessages
-    : "";
-
-  if (agentContent) {
-    const agentMessage: ChatMessage = {
-      role: "assistant",
-      content: agentContent,
-    };
-    return [
-      ...chatHistory.slice(0, -1),
-      agentMessage,
-      chatHistory.slice(-1)[0],
-    ] as ChatMessage[];
-  }
-  return chatHistory as ChatMessage[];
-};
-
-export const createWorkflow = (messages: Message[], params?: any) => {
-  const chatHistoryWithAgentMessages = prepareChatHistory(messages);
-  const runAgent = async (
-    context: Context,
-    agent: Workflow,
-    input: AgentInput,
-  ) => {
-    const run = agent.run(new StartEvent({ input }));
-    for await (const event of agent.streamEvents()) {
-      if (event.data instanceof AgentRunEvent) {
-        context.writeEventToStream(event.data);
-      }
-    }
-    return await run;
-  };
-
-  const start = async (context: Context, ev: StartEvent) => {
-    context.set("task", ev.data.input);
-
-    const chatHistoryStr = chatHistoryWithAgentMessages
-      .map((msg) => `${msg.role}: ${msg.content}`)
-      .join("\n");
-
-    // Decision-making process
-    const decision = await decideWorkflow(ev.data.input, chatHistoryStr);
-
-    if (decision !== "publish") {
-      return new ResearchEvent({
-        input: `Research for this task: ${ev.data.input}`,
-      });
-    } else {
-      return new ReportEvent({
-        input: `Publish content based on the chat history\n${chatHistoryStr}\n\n and task: ${ev.data.input}`,
-      });
-    }
-  };
-
-  const decideWorkflow = async (task: string, chatHistoryStr: string) => {
-    const llm = Settings.llm;
-
-    const prompt = `You are an expert in decision-making, helping people write and publish blog posts.
-If the user is asking for a file or to publish content, respond with 'publish'.
-If the user requests to write or update a blog post, respond with 'not_publish'.
-
-Here is the chat history:
-${chatHistoryStr}
-
-The current user request is:
-${task}
-
-Given the chat history and the new user request, decide whether to publish based on existing information.
-Decision (respond with either 'not_publish' or 'publish'):`;
-
-    const output = await llm.complete({ prompt: prompt });
-    const decision = output.text.trim().toLowerCase();
-    return decision === "publish" ? "publish" : "research";
-  };
-
-  const research = async (context: Context, ev: ResearchEvent) => {
-    const researcher = await createResearcher(
-      chatHistoryWithAgentMessages,
-      params,
-    );
-    const researchRes = await runAgent(context, researcher, {
-      message: ev.data.input,
-    });
-    const researchResult = researchRes.data.result;
-    return new AnalyzeEvent({
-      input: `Write a blog post given this task: ${context.get("task")} using this research content: ${researchResult}`,
-    });
-  };
-
-  const analyze = async (context: Context, ev: AnalyzeEvent) => {
-    const analyst = await createAnalyst(chatHistoryWithAgentMessages);
-    const analyzeRes = await runAgent(context, analyst, {
-      message: ev.data.input,
-    });
-    return new ReportEvent({
-      input: `Publish content based on the chat history\n${analyzeRes.data.result}\n\n and task: ${ev.data.input}`,
-    });
-  };
-
-  const report = async (context: Context, ev: ReportEvent) => {
-    const reporter = await createReporter(chatHistoryWithAgentMessages);
-
-    const reportResult = await runAgent(context, reporter, {
-      message: `${ev.data.input}`,
-      streaming: true,
-    });
-    return reportResult as unknown as StopEvent<
-      AsyncGenerator<ChatResponseChunk>
-    >;
-  };
-
-  const workflow = new Workflow({ timeout: TIMEOUT, validate: true });
-  workflow.addStep(StartEvent, start, {
-    outputs: [ResearchEvent, ReportEvent],
-  });
-  workflow.addStep(ResearchEvent, research, { outputs: AnalyzeEvent });
-  workflow.addStep(AnalyzeEvent, analyze, { outputs: ReportEvent });
-  workflow.addStep(ReportEvent, report, { outputs: StopEvent });
-
-  return workflow;
-};
diff --git a/templates/components/agents/typescript/financial_report/tools.ts b/templates/components/agents/typescript/financial_report/tools.ts
deleted file mode 100644
index 3d979a77af4f5f02aa57b9936ad5e000ffa252fa..0000000000000000000000000000000000000000
--- a/templates/components/agents/typescript/financial_report/tools.ts
+++ /dev/null
@@ -1,86 +0,0 @@
-import fs from "fs/promises";
-import { BaseToolWithCall, LlamaCloudIndex, QueryEngineTool } from "llamaindex";
-import path from "path";
-import { getDataSource } from "../engine";
-import { createTools } from "../engine/tools/index";
-
-export const getQueryEngineTools = async (
-  params?: any,
-): Promise<QueryEngineTool[] | null> => {
-  const topK = process.env.TOP_K ? parseInt(process.env.TOP_K) : undefined;
-
-  const index = await getDataSource(params);
-  if (!index) {
-    return null;
-  }
-  // index is LlamaCloudIndex use two query engine tools
-  if (index instanceof LlamaCloudIndex) {
-    return [
-      new QueryEngineTool({
-        queryEngine: index.asQueryEngine({
-          similarityTopK: topK,
-          retrieval_mode: "files_via_content",
-        }),
-        metadata: {
-          name: "document_retriever",
-          description: `Document retriever that retrieves entire documents from the corpus.
-  ONLY use for research questions that may require searching over entire research reports.
-  Will be slower and more expensive than chunk-level retrieval but may be necessary.`,
-        },
-      }),
-      new QueryEngineTool({
-        queryEngine: index.asQueryEngine({
-          similarityTopK: topK,
-          retrieval_mode: "chunks",
-        }),
-        metadata: {
-          name: "chunk_retriever",
-          description: `Retrieves a small set of relevant document chunks from the corpus.
-      Use for research questions that want to look up specific facts from the knowledge corpus,
-      and need entire documents.`,
-        },
-      }),
-    ];
-  } else {
-    return [
-      new QueryEngineTool({
-        queryEngine: (index as any).asQueryEngine({
-          similarityTopK: topK,
-        }),
-        metadata: {
-          name: "retriever",
-          description: `Use this tool to retrieve information about the text corpus from the index.`,
-        },
-      }),
-    ];
-  }
-};
-
-export const getAvailableTools = async () => {
-  const configFile = path.join("config", "tools.json");
-  let toolConfig: any;
-  const tools: BaseToolWithCall[] = [];
-  try {
-    toolConfig = JSON.parse(await fs.readFile(configFile, "utf8"));
-  } catch (e) {
-    console.info(`Could not read ${configFile} file. Using no tools.`);
-  }
-  if (toolConfig) {
-    tools.push(...(await createTools(toolConfig)));
-  }
-  const queryEngineTools = await getQueryEngineTools();
-  if (queryEngineTools) {
-    tools.push(...queryEngineTools);
-  }
-
-  return tools;
-};
-
-export const lookupTools = async (
-  toolNames: string[],
-): Promise<BaseToolWithCall[]> => {
-  const availableTools = await getAvailableTools();
-  return availableTools.filter((tool) =>
-    toolNames.includes(tool.metadata.name),
-  );
-};
diff --git a/templates/components/agents/typescript/financial_report/workflow/factory.ts b/templates/components/agents/typescript/financial_report/workflow/factory.ts
new file mode 100644
index 0000000000000000000000000000000000000000..deef2e17e8fe8e8bf0c8d59dfe60fd2db7cad489
--- /dev/null
+++ b/templates/components/agents/typescript/financial_report/workflow/factory.ts
@@ -0,0 +1,20 @@
+import { ChatMessage, ToolCallLLM } from "llamaindex";
+import { getTool } from "../engine/tools";
+import { FinancialReportWorkflow } from "./fin-report";
+import { getQueryEngineTools } from "./tools";
+
+const TIMEOUT = 360 * 1000;
+
+export async function createWorkflow(options: {
+  chatHistory: ChatMessage[];
+  llm?: ToolCallLLM;
+}) {
+  return new FinancialReportWorkflow({
+    chatHistory: options.chatHistory,
+    queryEngineTools: (await getQueryEngineTools()) || [],
+    codeInterpreterTool: (await getTool("interpreter"))!,
+    documentGeneratorTool: (await getTool("document_generator"))!,
+    llm: options.llm,
+    timeout: TIMEOUT,
+  });
+}
diff --git a/templates/components/agents/typescript/financial_report/workflow/fin-report.ts b/templates/components/agents/typescript/financial_report/workflow/fin-report.ts
new file mode 100644
index 0000000000000000000000000000000000000000..918d57470521233064c2561053ec144a78da9e91
--- /dev/null
+++ b/templates/components/agents/typescript/financial_report/workflow/fin-report.ts
@@ -0,0 +1,322 @@
+import {
+  HandlerContext,
+  StartEvent,
+  StopEvent,
+  Workflow,
+  WorkflowEvent,
+} from "@llamaindex/workflow";
+import {
+  BaseToolWithCall,
+  ChatMemoryBuffer,
+  ChatMessage,
+  ChatResponseChunk,
+  Settings,
+  ToolCall,
+  ToolCallLLM,
+} from "llamaindex";
+import { callTools, chatWithTools } from "./tools";
+import { AgentInput, AgentRunEvent } from "./type";
+
+// Create a custom event type
+class InputEvent extends WorkflowEvent<{ input: ChatMessage[] }> {}
+
+class ResearchEvent extends WorkflowEvent<{
+  toolCalls: ToolCall[];
+}> {}
+
+class AnalyzeEvent extends WorkflowEvent<{
+  input: ChatMessage | ToolCall[];
+}> {}
+
+class ReportGenerationEvent extends WorkflowEvent<{
+  toolCalls: ToolCall[];
+}> {}
+
+const DEFAULT_SYSTEM_PROMPT = `
+You are a financial analyst who are given a set of tools to help you.
+It's good to using appropriate tools for the user request and always use the information from the tools, don't make up anything yourself.
+For the query engine tool, you should break down the user request into a list of queries and call the tool with the queries.
+`;
+
+export class FinancialReportWorkflow extends Workflow<
+  null,
+  AgentInput,
+  ChatResponseChunk
+> {
+  llm: ToolCallLLM;
+  memory: ChatMemoryBuffer;
+  queryEngineTools: BaseToolWithCall[];
+  codeInterpreterTool: BaseToolWithCall;
+  documentGeneratorTool: BaseToolWithCall;
+  systemPrompt?: string;
+
+  constructor(options: {
+    llm?: ToolCallLLM;
+    chatHistory: ChatMessage[];
+    queryEngineTools: BaseToolWithCall[];
+    codeInterpreterTool: BaseToolWithCall;
+    documentGeneratorTool: BaseToolWithCall;
+    systemPrompt?: string;
+    verbose?: boolean;
+    timeout?: number;
+  }) {
+    super({
+      verbose: options?.verbose ?? false,
+      timeout: options?.timeout ?? 360,
+    });
+
+    this.llm = options.llm ?? (Settings.llm as ToolCallLLM);
+    if (!(this.llm instanceof ToolCallLLM)) {
+      throw new Error("LLM is not a ToolCallLLM");
+    }
+    this.systemPrompt = options.systemPrompt ?? DEFAULT_SYSTEM_PROMPT;
+    this.queryEngineTools = options.queryEngineTools;
+    this.codeInterpreterTool = options.codeInterpreterTool;
+
+    this.documentGeneratorTool = options.documentGeneratorTool;
+    this.memory = new ChatMemoryBuffer({
+      llm: this.llm,
+      chatHistory: options.chatHistory,
+    });
+
+    // Add steps
+    this.addStep(
+      {
+        inputs: [StartEvent<AgentInput>],
+        outputs: [InputEvent],
+      },
+      this.prepareChatHistory,
+    );
+
+    this.addStep(
+      {
+        inputs: [InputEvent],
+        outputs: [
+          InputEvent,
+          ResearchEvent,
+          AnalyzeEvent,
+          ReportGenerationEvent,
+          StopEvent,
+        ],
+      },
+      this.handleLLMInput,
+    );
+
+    this.addStep(
+      {
+        inputs: [ResearchEvent],
+        outputs: [AnalyzeEvent],
+      },
+      this.handleResearch,
+    );
+
+    this.addStep(
+      {
+        inputs: [AnalyzeEvent],
+        outputs: [InputEvent],
+      },
+      this.handleAnalyze,
+    );
+
+    this.addStep(
+      {
+        inputs: [ReportGenerationEvent],
+        outputs: [InputEvent],
+      },
+      this.handleReportGeneration,
+    );
+  }
+
+  prepareChatHistory = async (
+    ctx: HandlerContext<null>,
+    ev: StartEvent<AgentInput>,
+  ): Promise<InputEvent> => {
+    const { message } = ev.data;
+
+    if (this.systemPrompt) {
+      this.memory.put({ role: "system", content: this.systemPrompt });
+    }
+    this.memory.put({ role: "user", content: message });
+
+    return new InputEvent({ input: this.memory.getMessages() });
+  };
+
+  handleLLMInput = async (
+    ctx: HandlerContext<null>,
+    ev: InputEvent,
+  ): Promise<
+    | InputEvent
+    | ResearchEvent
+    | AnalyzeEvent
+    | ReportGenerationEvent
+    | StopEvent
+  > => {
+    const chatHistory = ev.data.input;
+
+    const tools = [this.codeInterpreterTool, this.documentGeneratorTool];
+    if (this.queryEngineTools) {
+      tools.push(...this.queryEngineTools);
+    }
+
+    const toolCallResponse = await chatWithTools(this.llm, tools, chatHistory);
+
+    if (!toolCallResponse.hasToolCall()) {
+      return new StopEvent(toolCallResponse.responseGenerator);
+    }
+
+    if (toolCallResponse.hasMultipleTools()) {
+      this.memory.put({
+        role: "assistant",
+        content:
+          "Calling different tools is not allowed. Please only use multiple calls of the same tool.",
+      });
+      return new InputEvent({ input: this.memory.getMessages() });
+    }
+
+    // Put the LLM tool call message into the memory
+    // And trigger the next step according to the tool call
+    if (toolCallResponse.toolCallMessage) {
+      this.memory.put(toolCallResponse.toolCallMessage);
+    }
+    const toolName = toolCallResponse.getToolNames()[0];
+    switch (toolName) {
+      case this.codeInterpreterTool.metadata.name:
+        return new AnalyzeEvent({
+          input: toolCallResponse.toolCalls,
+        });
+      case this.documentGeneratorTool.metadata.name:
+        return new ReportGenerationEvent({
+          toolCalls: toolCallResponse.toolCalls,
+        });
+      default:
+        if (
+          this.queryEngineTools &&
+          this.queryEngineTools.some((tool) => tool.metadata.name === toolName)
+        ) {
+          return new ResearchEvent({
+            toolCalls: toolCallResponse.toolCalls,
+          });
+        }
+        throw new Error(`Unknown tool: ${toolName}`);
+    }
+  };
+
+  handleResearch = async (
+    ctx: HandlerContext<null>,
+    ev: ResearchEvent,
+  ): Promise<AnalyzeEvent> => {
+    ctx.sendEvent(
+      new AgentRunEvent({
+        agent: "Researcher",
+        text: "Researching data",
+        type: "text",
+      }),
+    );
+
+    const { toolCalls } = ev.data;
+
+    const toolMsgs = await callTools({
+      tools: this.queryEngineTools,
+      toolCalls,
+      ctx,
+      agentName: "Researcher",
+    });
+    for (const toolMsg of toolMsgs) {
+      this.memory.put(toolMsg);
+    }
+    return new AnalyzeEvent({
+      input: {
+        role: "assistant",
+        content:
+          "I have finished researching the data, please analyze the data.",
+      },
+    });
+  };
+
+  /**
+   * Analyze a research result or a tool call for code interpreter from the LLM
+   */
+  handleAnalyze = async (
+    ctx: HandlerContext<null>,
+    ev: AnalyzeEvent,
+  ): Promise<InputEvent> => {
+    ctx.sendEvent(
+      new AgentRunEvent({
+        agent: "Analyst",
+        text: `Starting analysis`,
+        type: "text",
+      }),
+    );
+    // Request by workflow LLM, input is a list of tool calls
+    let toolCalls: ToolCall[] = [];
+    if (Array.isArray(ev.data.input)) {
+      toolCalls = ev.data.input;
+    } else {
+      // Requested by Researcher, input is a ChatMessage
+      // We start new LLM chat specifically for analyzing the data
+      const analysisPrompt = `
+      You are an expert in analyzing financial data.
+      You are given a set of financial data to analyze. Your task is to analyze the financial data and return a report.
+      Your response should include a detailed analysis of the financial data, including any trends, patterns, or insights that you find.
+      Construct the analysis in textual format; including tables would be great!
+      Don't need to synthesize the data, just analyze and provide your findings.
+      `;
+
+      // Clone the current chat history
+      // Add the analysis system prompt and the message from the researcher
+      const newChatHistory = [
+        ...this.memory.getMessages(),
+        { role: "system", content: analysisPrompt },
+        ev.data.input,
+      ];
+      const toolCallResponse = await chatWithTools(
+        this.llm,
+        [this.codeInterpreterTool],
+        newChatHistory as ChatMessage[],
+      );
+
+      if (!toolCallResponse.hasToolCall()) {
+        this.memory.put(await toolCallResponse.asFullResponse());
+        return new InputEvent({
+          input: this.memory.getMessages(),
+        });
+      } else {
+        this.memory.put(toolCallResponse.toolCallMessage);
+        toolCalls = toolCallResponse.toolCalls;
+      }
+    }
+
+    // Call the tools
+    const toolMsgs = await callTools({
+      tools: [this.codeInterpreterTool],
+      toolCalls,
+      ctx,
+      agentName: "Analyst",
+    });
+    for (const toolMsg of toolMsgs) {
+      this.memory.put(toolMsg);
+    }
+
+    return new InputEvent({
+      input: this.memory.getMessages(),
+    });
+  };
+
+  handleReportGeneration = async (
+    ctx: HandlerContext<null>,
+    ev: ReportGenerationEvent,
+  ): Promise<InputEvent> => {
+    const { toolCalls } = ev.data;
+
+    const toolMsgs = await callTools({
+      tools: [this.documentGeneratorTool],
+      toolCalls,
+      ctx,
+      agentName: "Reporter",
+    });
+    for (const toolMsg of toolMsgs) {
+      this.memory.put(toolMsg);
+    }
+    return new InputEvent({ input: this.memory.getMessages() });
+  };
+}
diff --git a/templates/components/agents/typescript/form_filling/README-template.md b/templates/components/agents/typescript/form_filling/README-template.md
new file mode 100644
index 0000000000000000000000000000000000000000..5729bf2ee6d1afa6d5ad7afd983688be5c112f1c
--- /dev/null
+++ b/templates/components/agents/typescript/form_filling/README-template.md
@@ -0,0 +1,37 @@
+This is a [LlamaIndex](https://www.llamaindex.ai/) project using [Next.js](https://nextjs.org/) bootstrapped with [`create-llama`](https://github.com/run-llama/LlamaIndexTS/tree/main/packages/create-llama).
+
+## Getting Started
+
+First, install the dependencies:
+
+```
+npm install
+```
+
+Then check the parameters that have been pre-configured in the `.env` file in this directory.
+Make sure you have the `OPENAI_API_KEY` set.
+
+Second, run the development server:
+
+```
+npm run dev
+```
+
+Open [http://localhost:3000](http://localhost:3000) with your browser to see the chat UI.
+
+## Use Case: Filling Financial CSV Template
+
+1. Upload the Apple and Tesla financial reports from the [data](./data) directory. Just send an empty message.
+2. Upload the CSV file [sec_10k_template.csv](./sec_10k_template.csv) and send the message "Fill the missing cells in the CSV file".
+
+The agent will fill the missing cells by retrieving the information from the uploaded financial reports and return a new CSV file with the filled cells.
+
+## Learn More
+
+To learn more about LlamaIndex, take a look at the following resources:
+
+- [LlamaIndex Documentation](https://docs.llamaindex.ai) - learn about LlamaIndex (Python features).
+- [LlamaIndexTS Documentation](https://ts.llamaindex.ai/docs/llamaindex) - learn about LlamaIndex (Typescript features).
+- [Workflows Introduction](https://ts.llamaindex.ai/docs/llamaindex/guide/workflow) - learn about LlamaIndexTS workflows.
+
+You can check out [the LlamaIndexTS GitHub repository](https://github.com/run-llama/LlamaIndexTS) - your feedback and contributions are welcome!
diff --git a/templates/components/agents/typescript/form_filling/sec_10k_template.csv b/templates/components/agents/typescript/form_filling/sec_10k_template.csv
new file mode 100644
index 0000000000000000000000000000000000000000..ae920ab8ac17b81ca0836e4a31fa28752c82c449
--- /dev/null
+++ b/templates/components/agents/typescript/form_filling/sec_10k_template.csv
@@ -0,0 +1,17 @@
+Parameter,2023 Apple (AAPL),2023 Tesla (TSLA)
+Revenue,,
+Net Income,,
+Earnings Per Share (EPS),,
+Debt-to-Equity Ratio,,
+Current Ratio,,
+Gross Margin,,
+Operating Margin,,
+Net Profit Margin,,
+Inventory Turnover,,
+Accounts Receivable Turnover,,
+Capital Expenditure,,
+Research and Development Expense,,
+Market Cap,,
+Price to Earnings Ratio,,
+Dividend Yield,,
+Year-over-Year Growth Rate,,
diff --git a/templates/components/agents/typescript/form_filling/workflow/factory.ts b/templates/components/agents/typescript/form_filling/workflow/factory.ts
new file mode 100644
index 0000000000000000000000000000000000000000..c38220d403a7ecb249e6031458698ced4c716bc9
--- /dev/null
+++ b/templates/components/agents/typescript/form_filling/workflow/factory.ts
@@ -0,0 +1,20 @@
+import { ChatMessage, ToolCallLLM } from "llamaindex";
+import { getTool } from "../engine/tools";
+import { FormFillingWorkflow } from "./form-filling";
+import { getQueryEngineTools } from "./tools";
+
+const TIMEOUT = 360 * 1000;
+
+export async function createWorkflow(options: {
+  chatHistory: ChatMessage[];
+  llm?: ToolCallLLM;
+}) {
+  return new FormFillingWorkflow({
+    chatHistory: options.chatHistory,
+    queryEngineTools: (await getQueryEngineTools()) || [],
+    extractorTool: (await getTool("extract_missing_cells"))!,
+    fillMissingCellsTool: (await getTool("fill_missing_cells"))!,
+    llm: options.llm,
+    timeout: TIMEOUT,
+  });
+}
diff --git a/templates/components/agents/typescript/form_filling/workflow/form-filling.ts b/templates/components/agents/typescript/form_filling/workflow/form-filling.ts
new file mode 100644
index 0000000000000000000000000000000000000000..3f92aa56889124c0fb5c22a20d16c347e3dca693
--- /dev/null
+++ b/templates/components/agents/typescript/form_filling/workflow/form-filling.ts
@@ -0,0 +1,275 @@
+import {
+  HandlerContext,
+  StartEvent,
+  StopEvent,
+  Workflow,
+  WorkflowEvent,
+} from "@llamaindex/workflow";
+import {
+  BaseToolWithCall,
+  ChatMemoryBuffer,
+  ChatMessage,
+  ChatResponseChunk,
+  Settings,
+  ToolCall,
+  ToolCallLLM,
+} from "llamaindex";
+import { callTools, chatWithTools } from "./tools";
+import { AgentInput, AgentRunEvent } from "./type";
+
+// Create a custom event type
+class InputEvent extends WorkflowEvent<{ input: ChatMessage[] }> {}
+
+class ExtractMissingCellsEvent extends WorkflowEvent<{
+  toolCalls: ToolCall[];
+}> {}
+
+class FindAnswersEvent extends WorkflowEvent<{
+  toolCalls: ToolCall[];
+}> {}
+
+class FillMissingCellsEvent extends WorkflowEvent<{
+  toolCalls: ToolCall[];
+}> {}
+
+const DEFAULT_SYSTEM_PROMPT = `
+You are a helpful assistant who helps fill missing cells in a CSV file.
+Only use the information from the retriever tool - don't make up any information yourself. Fill N/A if an answer is not found.
+If there is no retriever tool or the gathered information has many N/A values indicating the questions don't match the data, respond with a warning and ask the user to upload a different file or connect to a knowledge base.
+You can make multiple tool calls at once but only call with the same tool.
+Only use the local file path for the tools.
+`;
+
+export class FormFillingWorkflow extends Workflow<
+  null,
+  AgentInput,
+  ChatResponseChunk
+> {
+  llm: ToolCallLLM;
+  memory: ChatMemoryBuffer;
+  extractorTool: BaseToolWithCall;
+  queryEngineTools?: BaseToolWithCall[];
+  fillMissingCellsTool: BaseToolWithCall;
+  systemPrompt?: string;
+
+  constructor(options: {
+    llm?: ToolCallLLM;
+    chatHistory: ChatMessage[];
+    extractorTool: BaseToolWithCall;
+    queryEngineTools?: BaseToolWithCall[];
+    fillMissingCellsTool: BaseToolWithCall;
+    systemPrompt?: string;
+    verbose?: boolean;
+    timeout?: number;
+  }) {
+    super({
+      verbose: options?.verbose ?? false,
+      timeout: options?.timeout ?? 360,
+    });
+
+    this.llm = options.llm ?? (Settings.llm as ToolCallLLM);
+    if (!(this.llm instanceof ToolCallLLM)) {
+      throw new Error("LLM is not a ToolCallLLM");
+    }
+    this.systemPrompt = options.systemPrompt ?? DEFAULT_SYSTEM_PROMPT;
+    this.extractorTool = options.extractorTool;
+    this.queryEngineTools = options.queryEngineTools;
+    this.fillMissingCellsTool = options.fillMissingCellsTool;
+
+    this.memory = new ChatMemoryBuffer({
+      llm: this.llm,
+      chatHistory: options.chatHistory,
+    });
+
+    // Add steps
+    this.addStep(
+      {
+        inputs: [StartEvent<AgentInput>],
+        outputs: [InputEvent],
+      },
+      this.prepareChatHistory,
+    );
+
+    this.addStep(
+      {
+        inputs: [InputEvent],
+        outputs: [
+          InputEvent,
+          ExtractMissingCellsEvent,
+          FindAnswersEvent,
+          FillMissingCellsEvent,
+          StopEvent,
+        ],
+      },
+      this.handleLLMInput,
+    );
+
+    this.addStep(
+      {
+        inputs: [ExtractMissingCellsEvent],
+        outputs: [InputEvent],
+      },
+      this.handleExtractMissingCells,
+    );
+
+    this.addStep(
+      {
+        inputs: [FindAnswersEvent],
+        outputs: [InputEvent],
+      },
+      this.handleFindAnswers,
+    );
+
+    this.addStep(
+      {
+        inputs: [FillMissingCellsEvent],
+        outputs: [InputEvent],
+      },
+      this.handleFillMissingCells,
+    );
+  }
+
+  prepareChatHistory = async (
+    ctx: HandlerContext<null>,
+    ev: StartEvent<AgentInput>,
+  ): Promise<InputEvent> => {
+    const { message } = ev.data;
+
+    if (this.systemPrompt) {
+      this.memory.put({ role: "system", content: this.systemPrompt });
+    }
+    this.memory.put({ role: "user", content: message });
+
+    return new InputEvent({ input: this.memory.getMessages() });
+  };
+
+  handleLLMInput = async (
+    ctx: HandlerContext<null>,
+    ev: InputEvent,
+  ): Promise<
+    | InputEvent
+    | ExtractMissingCellsEvent
+    | FindAnswersEvent
+    | FillMissingCellsEvent
+    | StopEvent
+  > => {
+    const chatHistory = ev.data.input;
+
+    const tools = [this.extractorTool, this.fillMissingCellsTool];
+    if (this.queryEngineTools) {
+      tools.push(...this.queryEngineTools);
+    }
+
+    const toolCallResponse = await chatWithTools(this.llm, tools, chatHistory);
+
+    if (!toolCallResponse.hasToolCall()) {
+      return new StopEvent(toolCallResponse.responseGenerator);
+    }
+
+    if (toolCallResponse.hasMultipleTools()) {
+      this.memory.put({
+        role: "assistant",
+        content:
+          "Calling different tools is not allowed. Please only use multiple calls of the same tool.",
+      });
+      return new InputEvent({ input: this.memory.getMessages() });
+    }
+
+    // Put the LLM tool call message into the memory
+    // And trigger the next step according to the tool call
+    if (toolCallResponse.toolCallMessage) {
+      this.memory.put(toolCallResponse.toolCallMessage);
+    }
+    const toolName = toolCallResponse.getToolNames()[0];
+    switch (toolName) {
+      case this.extractorTool.metadata.name:
+        return new ExtractMissingCellsEvent({
+          toolCalls: toolCallResponse.toolCalls,
+        });
+      case this.fillMissingCellsTool.metadata.name:
+        return new FillMissingCellsEvent({
+          toolCalls: toolCallResponse.toolCalls,
+        });
+      default:
+        if (
+          this.queryEngineTools &&
+          this.queryEngineTools.some((tool) => tool.metadata.name === toolName)
+        ) {
+          return new FindAnswersEvent({
+            toolCalls: toolCallResponse.toolCalls,
+          });
+        }
+        throw new Error(`Unknown tool: ${toolName}`);
+    }
+  };
+
+  handleExtractMissingCells = async (
+    ctx: HandlerContext<null>,
+    ev: ExtractMissingCellsEvent,
+  ): Promise<InputEvent> => {
+    ctx.sendEvent(
+      new AgentRunEvent({
+        agent: "CSVExtractor",
+        text: "Extracting missing cells",
+        type: "text",
+      }),
+    );
+    const { toolCalls } = ev.data;
+    const toolMsgs = await callTools({
+      tools: [this.extractorTool],
+      toolCalls,
+      ctx,
+      agentName: "CSVExtractor",
+    });
+    for (const toolMsg of toolMsgs) {
+      this.memory.put(toolMsg);
+    }
+    return new InputEvent({ input: this.memory.getMessages() });
+  };
+
+  handleFindAnswers = async (
+    ctx: HandlerContext<null>,
+    ev: FindAnswersEvent,
+  ): Promise<InputEvent> => {
+    const { toolCalls } = ev.data;
+    if (!this.queryEngineTools) {
+      throw new Error("Query engine tool is not available");
+    }
+    ctx.sendEvent(
+      new AgentRunEvent({
+        agent: "Researcher",
+        text: "Finding answers",
+        type: "text",
+      }),
+    );
+    const toolMsgs = await callTools({
+      tools: this.queryEngineTools,
+      toolCalls,
+      ctx,
+      agentName: "Researcher",
+    });
+
+    for (const toolMsg of toolMsgs) {
+      this.memory.put(toolMsg);
+    }
+    return new InputEvent({ input: this.memory.getMessages() });
+  };
+
+  handleFillMissingCells = async (
+    ctx: HandlerContext<null>,
+    ev: FillMissingCellsEvent,
+  ): Promise<InputEvent> => {
+    const { toolCalls } = ev.data;
+
+    const toolMsgs = await callTools({
+      tools: [this.fillMissingCellsTool],
+      toolCalls,
+      ctx,
+      agentName: "Processor",
+    });
+    for (const toolMsg of toolMsgs) {
+      this.memory.put(toolMsg);
+    }
+    return new InputEvent({ input: this.memory.getMessages() });
+  };
+}
diff --git a/templates/components/engines/typescript/agent/tools/form-filling.ts b/templates/components/engines/typescript/agent/tools/form-filling.ts
new file mode 100644
index 0000000000000000000000000000000000000000..6ac0a5253ed675744cdfb91f4b4897d9cef2bec7
--- /dev/null
+++ b/templates/components/engines/typescript/agent/tools/form-filling.ts
@@ -0,0 +1,296 @@
+import { JSONSchemaType } from "ajv";
+import fs from "fs";
+import { BaseTool, Settings, ToolMetadata } from "llamaindex";
+import Papa from "papaparse";
+import path from "path";
+import { saveDocument } from "../../llamaindex/documents/helper";
+
+type ExtractMissingCellsParameter = {
+  filePath: string;
+};
+
+export type MissingCell = {
+  rowIndex: number;
+  columnIndex: number;
+  question: string;
+};
+
+const CSV_EXTRACTION_PROMPT = `You are a data analyst. You are given a table with missing cells.
+Your task is to identify the missing cells and the questions needed to fill them.
+IMPORTANT: Column indices should be 0-based
+
+# Instructions:
+- Understand the entire content of the table and the topics of the table.
+- Identify the missing cells and the meaning of the data in the cells.
+- For each missing cell, provide the row index and the correct column index (remember: first data column is 1).
+- For each missing cell, provide the question needed to fill the cell (it's important to provide the question that is relevant to the topic of the table).
+- Since the cell's value should be concise, the question should request a numerical answer or a specific value.
+- Finally, only return the answer in JSON format with the following schema:
+{
+  "missing_cells": [
+    {
+      "rowIndex": number,
+      "columnIndex": number,
+      "question": string
+    }
+  ]
+}
+- If there are no missing cells, return an empty array.
+- The answer is only the JSON object, nothing else and don't wrap it inside markdown code block.
+
+# Example:
+# |    | Name | Age | City |
+# |----|------|-----|------|
+# | 0  | John |     | Paris|
+# | 1  | Mary |     |      |
+# | 2  |      | 30  |      |
+#
+# Your thoughts:
+# - The table is about people's names, ages, and cities.
+# - Row: 1, Column: 2 (Age column), Question: "How old is Mary? Please provide only the numerical answer."
+# - Row: 1, Column: 3 (City column), Question: "In which city does Mary live? Please provide only the city name."
+# Your answer:
+# {
+#   "missing_cells": [
+#     {
+#       "rowIndex": 1,
+#       "columnIndex": 2,
+#       "question": "How old is Mary? Please provide only the numerical answer."
+#     },
+#     {
+#       "rowIndex": 1,
+#       "columnIndex": 3,
+#       "question": "In which city does Mary live? Please provide only the city name."
+#     }
+#   ]
+# }
+
+
+# Here is your task:
+
+- Table content:
+{table_content}
+
+- Your answer:
+`;
+
+const DEFAULT_METADATA: ToolMetadata<
+  JSONSchemaType<ExtractMissingCellsParameter>
+> = {
+  name: "extract_missing_cells",
+  description: `Use this tool to extract missing cells in a CSV file and generate questions to fill them. This tool only works with local file path.`,
+  parameters: {
+    type: "object",
+    properties: {
+      filePath: {
+        type: "string",
+        description: "The local file path to the CSV file.",
+      },
+    },
+    required: ["filePath"],
+  },
+};
+
+export interface ExtractMissingCellsParams {
+  metadata?: ToolMetadata<JSONSchemaType<ExtractMissingCellsParameter>>;
+}
+
+export class ExtractMissingCellsTool
+  implements BaseTool<ExtractMissingCellsParameter>
+{
+  metadata: ToolMetadata<JSONSchemaType<ExtractMissingCellsParameter>>;
+  defaultExtractionPrompt: string;
+
+  constructor(params: ExtractMissingCellsParams) {
+    this.metadata = params.metadata ?? DEFAULT_METADATA;
+    this.defaultExtractionPrompt = CSV_EXTRACTION_PROMPT;
+  }
+
+  private readCsvFile(filePath: string): Promise<string[][]> {
+    return new Promise((resolve, reject) => {
+      fs.readFile(filePath, "utf8", (err, data) => {
+        if (err) {
+          reject(err);
+          return;
+        }
+
+        const parsedData = Papa.parse<string[]>(data, {
+          skipEmptyLines: false,
+        });
+
+        if (parsedData.errors.length) {
+          reject(parsedData.errors);
+          return;
+        }
+
+        // Ensure all rows have the same number of columns as the header
+        const maxColumns = parsedData.data[0].length;
+        const paddedRows = parsedData.data.map((row) => {
+          return [...row, ...Array(maxColumns - row.length).fill("")];
+        });
+
+        resolve(paddedRows);
+      });
+    });
+  }
+
+  private formatToMarkdownTable(data: string[][]): string {
+    if (data.length === 0) return "";
+
+    const maxColumns = data[0].length;
+
+    const headerRow = `| ${data[0].join(" | ")} |`;
+    const separatorRow = `| ${Array(maxColumns).fill("---").join(" | ")} |`;
+
+    const dataRows = data.slice(1).map((row) => {
+      return `| ${row.join(" | ")} |`;
+    });
+
+    return [headerRow, separatorRow, ...dataRows].join("\n");
+  }
+
+  async call(input: ExtractMissingCellsParameter): Promise<MissingCell[]> {
+    const { filePath } = input;
+    let tableContent: string[][];
+    try {
+      tableContent = await this.readCsvFile(filePath);
+    } catch (error) {
+      throw new Error(
+        `Failed to read CSV file. Make sure that you are reading a local file path (not a sandbox path).`,
+      );
+    }
+
+    const prompt = this.defaultExtractionPrompt.replace(
+      "{table_content}",
+      this.formatToMarkdownTable(tableContent),
+    );
+
+    const llm = Settings.llm;
+    const response = await llm.complete({
+      prompt,
+    });
+    const rawAnswer = response.text;
+    const parsedResponse = JSON.parse(rawAnswer) as {
+      missing_cells: MissingCell[];
+    };
+    if (!parsedResponse.missing_cells) {
+      throw new Error(
+        "The answer is not in the correct format. There should be a missing_cells array.",
+      );
+    }
+    const answer = parsedResponse.missing_cells;
+
+    return answer;
+  }
+}
+
+type FillMissingCellsParameter = {
+  filePath: string;
+  cells: {
+    rowIndex: number;
+    columnIndex: number;
+    answer: string;
+  }[];
+};
+
+const FILL_CELLS_METADATA: ToolMetadata<
+  JSONSchemaType<FillMissingCellsParameter>
+> = {
+  name: "fill_missing_cells",
+  description: `Use this tool to fill missing cells in a CSV file with provided answers. This tool only works with local file path.`,
+  parameters: {
+    type: "object",
+    properties: {
+      filePath: {
+        type: "string",
+        description: "The local file path to the CSV file.",
+      },
+      cells: {
+        type: "array",
+        items: {
+          type: "object",
+          properties: {
+            rowIndex: { type: "number" },
+            columnIndex: { type: "number" },
+            answer: { type: "string" },
+          },
+          required: ["rowIndex", "columnIndex", "answer"],
+        },
+        description: "Array of cells to fill with their answers",
+      },
+    },
+    required: ["filePath", "cells"],
+  },
+};
+
+export interface FillMissingCellsParams {
+  metadata?: ToolMetadata<JSONSchemaType<FillMissingCellsParameter>>;
+}
+
+export class FillMissingCellsTool
+  implements BaseTool<FillMissingCellsParameter>
+{
+  metadata: ToolMetadata<JSONSchemaType<FillMissingCellsParameter>>;
+
+  constructor(params: FillMissingCellsParams = {}) {
+    this.metadata = params.metadata ?? FILL_CELLS_METADATA;
+  }
+
+  async call(input: FillMissingCellsParameter): Promise<string> {
+    const { filePath, cells } = input;
+
+    // Read the CSV file
+    const fileContent = await new Promise<string>((resolve, reject) => {
+      fs.readFile(filePath, "utf8", (err, data) => {
+        if (err) {
+          reject(err);
+        } else {
+          resolve(data);
+        }
+      });
+    });
+
+    // Parse CSV with PapaParse
+    const parseResult = Papa.parse<string[]>(fileContent, {
+      header: false, // Ensure the header is not treated as a separate object
+      skipEmptyLines: false, // Ensure empty lines are not skipped
+    });
+
+    if (parseResult.errors.length) {
+      throw new Error(
+        "Failed to parse CSV file: " + parseResult.errors[0].message,
+      );
+    }
+
+    const rows = parseResult.data;
+
+    // Fill the cells with answers
+    for (const cell of cells) {
+      // Adjust rowIndex to start from 1 for data rows
+      const adjustedRowIndex = cell.rowIndex + 1;
+      if (
+        adjustedRowIndex < rows.length &&
+        cell.columnIndex < rows[adjustedRowIndex].length
+      ) {
+        rows[adjustedRowIndex][cell.columnIndex] = cell.answer;
+      }
+    }
+
+    // Convert back to CSV format
+    const updatedContent = Papa.unparse(rows, {
+      delimiter: parseResult.meta.delimiter,
+    });
+
+    // Use the helper function to write the file
+    const parsedPath = path.parse(filePath);
+    const newFileName = `${parsedPath.name}-filled${parsedPath.ext}`;
+    const newFilePath = path.join("output/tools", newFileName);
+
+    const newFileUrl = await saveDocument(newFilePath, updatedContent);
+
+    return (
+      "Successfully filled missing cells in the CSV file. File URL to show to the user: " +
+      newFileUrl
+    );
+  }
+}
diff --git a/templates/components/engines/typescript/agent/tools/index.ts b/templates/components/engines/typescript/agent/tools/index.ts
index 062e2eb04aad6634dd4b7e99ebf0d5c14c2e6c57..c49de77f0ef6e25de2ccad15fd151a9050b81eb0 100644
--- a/templates/components/engines/typescript/agent/tools/index.ts
+++ b/templates/components/engines/typescript/agent/tools/index.ts
@@ -1,11 +1,19 @@
 import { BaseToolWithCall } from "llamaindex";
 import { ToolsFactory } from "llamaindex/tools/ToolsFactory";
+import fs from "node:fs/promises";
+import path from "node:path";
 import { CodeGeneratorTool, CodeGeneratorToolParams } from "./code-generator";
 import {
   DocumentGenerator,
   DocumentGeneratorParams,
 } from "./document-generator";
 import { DuckDuckGoSearchTool, DuckDuckGoToolParams } from "./duckduckgo";
+import {
+  ExtractMissingCellsParams,
+  ExtractMissingCellsTool,
+  FillMissingCellsParams,
+  FillMissingCellsTool,
+} from "./form-filling";
 import { ImgGeneratorTool, ImgGeneratorToolParams } from "./img-gen";
 import { InterpreterTool, InterpreterToolParams } from "./interpreter";
 import { OpenAPIActionTool } from "./openapi-action";
@@ -54,6 +62,12 @@ const toolFactory: Record<string, ToolCreator> = {
   document_generator: async (config: unknown) => {
     return [new DocumentGenerator(config as DocumentGeneratorParams)];
   },
+  form_filling: async (config: unknown) => {
+    return [
+      new ExtractMissingCellsTool(config as ExtractMissingCellsParams),
+      new FillMissingCellsTool(config as FillMissingCellsParams),
+    ];
+  },
 };
 
 async function createLocalTools(
@@ -70,3 +84,19 @@ async function createLocalTools(
 
   return tools;
 }
+
+export async function getConfiguredTools(
+  configPath?: string,
+): Promise<BaseToolWithCall[]> {
+  const configFile = path.join(configPath ?? "config", "tools.json");
+  const toolConfig = JSON.parse(await fs.readFile(configFile, "utf8"));
+  const tools = await createTools(toolConfig);
+  return tools;
+}
+
+export async function getTool(
+  toolName: string,
+): Promise<BaseToolWithCall | undefined> {
+  const tools = await getConfiguredTools();
+  return tools.find((tool) => tool.metadata.name === toolName);
+}
diff --git a/templates/components/llamaindex/typescript/documents/helper.ts b/templates/components/llamaindex/typescript/documents/helper.ts
index 44e0520b722dcb3a7fb8928bdd215006841be961..a6d18c9e264b7fcd3a7be0fc98fb456919ab5dff 100644
--- a/templates/components/llamaindex/typescript/documents/helper.ts
+++ b/templates/components/llamaindex/typescript/documents/helper.ts
@@ -13,7 +13,7 @@ const MIME_TYPE_TO_EXT: Record<string, string> = {
     "docx",
 };
 
-const UPLOADED_FOLDER = "output/uploaded";
+export const UPLOADED_FOLDER = "output/uploaded";
 
 export async function storeAndParseFile(
   name: string,
diff --git a/templates/components/llamaindex/typescript/documents/upload.ts b/templates/components/llamaindex/typescript/documents/upload.ts
index 05070cf3dc62eb9cc417ee53f67f8034f87f9d87..b3786a377aa92dba154394a5099b0503ce89fa51 100644
--- a/templates/components/llamaindex/typescript/documents/upload.ts
+++ b/templates/components/llamaindex/typescript/documents/upload.ts
@@ -1,7 +1,5 @@
 import { Document, LLamaCloudFileService, VectorStoreIndex } from "llamaindex";
 import { LlamaCloudIndex } from "llamaindex/cloud/LlamaCloudIndex";
-import fs from "node:fs/promises";
-import path from "node:path";
 import { DocumentFile } from "../streaming/annotations";
 import { parseFile, storeFile } from "./helper";
 import { runPipeline } from "./pipeline";
@@ -18,8 +16,8 @@ export async function uploadDocument(
   // Store file
   const fileMetadata = await storeFile(name, fileBuffer, mimeType);
 
-  // If the file is csv and has codeExecutorTool, we don't need to index the file.
-  if (mimeType === "text/csv" && (await hasCodeExecutorTool())) {
+  // Do not index csv files
+  if (mimeType === "text/csv") {
     return fileMetadata;
   }
   let documentIds: string[] = [];
@@ -61,14 +59,3 @@ export async function uploadDocument(
   fileMetadata.refs = documentIds;
   return fileMetadata;
 }
-
-const hasCodeExecutorTool = async () => {
-  const codeExecutorTools = ["interpreter", "artifact"];
-
-  const configFile = path.join("config", "tools.json");
-  const toolConfig = JSON.parse(await fs.readFile(configFile, "utf8"));
-
-  const localTools = toolConfig.local || {};
-  // Check if local tools contains codeExecutorTools
-  return codeExecutorTools.some((tool) => localTools[tool] !== undefined);
-};
diff --git a/templates/components/llamaindex/typescript/streaming/annotations.ts b/templates/components/llamaindex/typescript/streaming/annotations.ts
index bf7e46ab55d8a7275c5ce1dfd766578dcfcd01b2..164ddca6b7e94904428a070b86783d490a8f9b3f 100644
--- a/templates/components/llamaindex/typescript/streaming/annotations.ts
+++ b/templates/components/llamaindex/typescript/streaming/annotations.ts
@@ -1,5 +1,11 @@
 import { JSONValue, Message } from "ai";
-import { MessageContent, MessageContentDetail } from "llamaindex";
+import {
+  ChatMessage,
+  MessageContent,
+  MessageContentDetail,
+  MessageType,
+} from "llamaindex";
+import { UPLOADED_FOLDER } from "../documents/helper";
 
 export type DocumentFileType = "csv" | "pdf" | "txt" | "docx";
 
@@ -58,6 +64,45 @@ export function retrieveMessageContent(messages: Message[]): MessageContent {
   ];
 }
 
+export function convertToChatHistory(messages: Message[]): ChatMessage[] {
+  if (!messages || !Array.isArray(messages)) {
+    return [];
+  }
+  const agentHistory = retrieveAgentHistoryMessage(messages);
+  if (agentHistory) {
+    const previousMessages = messages.slice(0, -1);
+    return [...previousMessages, agentHistory].map((msg) => ({
+      role: msg.role as MessageType,
+      content: msg.content,
+    }));
+  }
+  return messages.map((msg) => ({
+    role: msg.role as MessageType,
+    content: msg.content,
+  }));
+}
+
+function retrieveAgentHistoryMessage(
+  messages: Message[],
+  maxAgentMessages = 10,
+): ChatMessage | null {
+  const agentAnnotations = getAnnotations<{ agent: string; text: string }>(
+    messages,
+    { role: "assistant", type: "agent" },
+  ).slice(-maxAgentMessages);
+
+  if (agentAnnotations.length > 0) {
+    const messageContent =
+      "Here is the previous conversation of agents:\n" +
+      agentAnnotations.map((annotation) => annotation.data.text).join("\n");
+    return {
+      role: "assistant",
+      content: messageContent,
+    };
+  }
+  return null;
+}
+
 function getFileContent(file: DocumentFile): string {
   let defaultContent = `=====File: ${file.name}=====\n`;
   // Include file URL if it's available
@@ -84,6 +129,10 @@ function getFileContent(file: DocumentFile): string {
   const sandboxFilePath = `/tmp/${file.name}`;
   defaultContent += `Sandbox file path (instruction: only use sandbox path for artifact or code interpreter tool): ${sandboxFilePath}\n`;
 
+  // Include local file path
+  const localFilePath = `${UPLOADED_FOLDER}/${file.name}`;
+  defaultContent += `Local file path (instruction: use for local tool that requires a local path): ${localFilePath}\n`;
+
   return defaultContent;
 }
 
@@ -127,13 +176,10 @@ function retrieveLatestArtifact(messages: Message[]): MessageContentDetail[] {
 }
 
 function convertAnnotations(messages: Message[]): MessageContentDetail[] {
-  // annotations from the last user message that has annotations
-  const annotations: Annotation[] =
-    messages
-      .slice()
-      .reverse()
-      .find((message) => message.role === "user" && message.annotations)
-      ?.annotations?.map(getValidAnnotation) || [];
+  // get all annotations from user messages
+  const annotations: Annotation[] = messages
+    .filter((message) => message.role === "user" && message.annotations)
+    .flatMap((message) => message.annotations?.map(getValidAnnotation) || []);
   if (annotations.length === 0) return [];
 
   const content: MessageContentDetail[] = [];
diff --git a/templates/components/multiagent/typescript/express/chat.controller.ts b/templates/components/multiagent/typescript/express/chat.controller.ts
index 8dfaf6c4ba2437311d32c74991a804bad17b68fb..eaf11bcd302a35783ebda0f4a8b7965041683775 100644
--- a/templates/components/multiagent/typescript/express/chat.controller.ts
+++ b/templates/components/multiagent/typescript/express/chat.controller.ts
@@ -1,36 +1,34 @@
-import { StopEvent } from "@llamaindex/core/workflow";
 import { Message, streamToResponse } from "ai";
 import { Request, Response } from "express";
-import { ChatResponseChunk } from "llamaindex";
+import {
+  convertToChatHistory,
+  retrieveMessageContent,
+} from "./llamaindex/streaming/annotations";
 import { createWorkflow } from "./workflow/factory";
-import { toDataStream, workflowEventsToStreamData } from "./workflow/stream";
+import { createStreamFromWorkflowContext } from "./workflow/stream";
 
 export const chat = async (req: Request, res: Response) => {
   try {
-    const { messages, data }: { messages: Message[]; data?: any } = req.body;
-    const userMessage = messages.pop();
-    if (!messages || !userMessage || userMessage.role !== "user") {
+    const { messages }: { messages: Message[] } = req.body;
+    if (!messages || messages.length === 0) {
       return res.status(400).json({
-        error:
-          "messages are required in the request body and the last message must be from the user",
+        error: "messages are required in the request body",
       });
     }
+    const chatHistory = convertToChatHistory(messages);
+    const userMessageContent = retrieveMessageContent(messages);
 
-    const agent = createWorkflow(messages, data);
-    const result = agent.run<AsyncGenerator<ChatResponseChunk>>(
-      userMessage.content,
-    ) as unknown as Promise<StopEvent<AsyncGenerator<ChatResponseChunk>>>;
+    const workflow = await createWorkflow({ chatHistory });
 
-    // convert the workflow events to a vercel AI stream data object
-    const agentStreamData = await workflowEventsToStreamData(
-      agent.streamEvents(),
-    );
-    // convert the workflow result to a vercel AI content stream
-    const stream = toDataStream(result, {
-      onFinal: () => agentStreamData.close(),
+    const context = workflow.run({
+      message: userMessageContent,
+      streaming: true,
     });
 
-    return streamToResponse(stream, res, {}, agentStreamData);
+    const { stream, dataStream } =
+      await createStreamFromWorkflowContext(context);
+
+    return streamToResponse(stream, res, {}, dataStream);
   } catch (error) {
     console.error("[LlamaIndex]", error);
     return res.status(500).json({
diff --git a/templates/components/multiagent/typescript/nextjs/route.ts b/templates/components/multiagent/typescript/nextjs/route.ts
index 2f93e0f7acc2904a5fb7498668a39efc86665f1b..cdd5561fc7024fa88b0f527f8813ebd3e81b611d 100644
--- a/templates/components/multiagent/typescript/nextjs/route.ts
+++ b/templates/components/multiagent/typescript/nextjs/route.ts
@@ -1,11 +1,14 @@
 import { initObservability } from "@/app/observability";
-import { StopEvent } from "@llamaindex/core/workflow";
-import { Message, StreamingTextResponse } from "ai";
-import { ChatResponseChunk } from "llamaindex";
+import { StreamingTextResponse, type Message } from "ai";
 import { NextRequest, NextResponse } from "next/server";
 import { initSettings } from "./engine/settings";
+import {
+  convertToChatHistory,
+  isValidMessages,
+  retrieveMessageContent,
+} from "./llamaindex/streaming/annotations";
 import { createWorkflow } from "./workflow/factory";
-import { toDataStream, workflowEventsToStreamData } from "./workflow/stream";
+import { createStreamFromWorkflowContext } from "./workflow/stream";
 
 initObservability();
 initSettings();
@@ -16,9 +19,8 @@ export const dynamic = "force-dynamic";
 export async function POST(request: NextRequest) {
   try {
     const body = await request.json();
-    const { messages, data }: { messages: Message[]; data?: any } = body;
-    const userMessage = messages.pop();
-    if (!messages || !userMessage || userMessage.role !== "user") {
+    const { messages }: { messages: Message[]; data?: any } = body;
+    if (!isValidMessages(messages)) {
       return NextResponse.json(
         {
           error:
@@ -28,20 +30,20 @@ export async function POST(request: NextRequest) {
       );
     }
 
-    const agent = createWorkflow(messages, data);
-    // TODO: fix type in agent.run in LITS
-    const result = agent.run<AsyncGenerator<ChatResponseChunk>>(
-      userMessage.content,
-    ) as unknown as Promise<StopEvent<AsyncGenerator<ChatResponseChunk>>>;
-    // convert the workflow events to a vercel AI stream data object
-    const agentStreamData = await workflowEventsToStreamData(
-      agent.streamEvents(),
-    );
-    // convert the workflow result to a vercel AI content stream
-    const stream = toDataStream(result, {
-      onFinal: () => agentStreamData.close(),
+    const chatHistory = convertToChatHistory(messages);
+    const userMessageContent = retrieveMessageContent(messages);
+
+    const workflow = await createWorkflow({ chatHistory });
+
+    const context = workflow.run({
+      message: userMessageContent,
+      streaming: true,
     });
-    return new StreamingTextResponse(stream, {}, agentStreamData);
+    const { stream, dataStream } =
+      await createStreamFromWorkflowContext(context);
+
+    // Return the two streams in one response
+    return new StreamingTextResponse(stream, {}, dataStream);
   } catch (error) {
     console.error("[LlamaIndex]", error);
     return NextResponse.json(
diff --git a/templates/components/multiagent/typescript/workflow/single-agent.ts b/templates/components/multiagent/typescript/workflow/single-agent.ts
index c32ad82fd4a1f474a6c4fc917f1171b1570b175d..9e6acaa11ca1b7d0156e9cdb480d660bb207d29f 100644
--- a/templates/components/multiagent/typescript/workflow/single-agent.ts
+++ b/templates/components/multiagent/typescript/workflow/single-agent.ts
@@ -1,22 +1,21 @@
 import {
-  Context,
+  HandlerContext,
   StartEvent,
   StopEvent,
   Workflow,
   WorkflowEvent,
-} from "@llamaindex/core/workflow";
+} from "@llamaindex/workflow";
 import {
   BaseToolWithCall,
   ChatMemoryBuffer,
   ChatMessage,
-  ChatResponse,
   ChatResponseChunk,
+  QueryEngineTool,
   Settings,
   ToolCall,
   ToolCallLLM,
-  ToolCallLLMMessageOptions,
-  callTool,
 } from "llamaindex";
+import { callTools, chatWithTools } from "./tools";
 import { AgentInput, AgentRunEvent } from "./type";
 
 class InputEvent extends WorkflowEvent<{
@@ -27,11 +26,23 @@ class ToolCallEvent extends WorkflowEvent<{
   toolCalls: ToolCall[];
 }> {}
 
-export class FunctionCallingAgent extends Workflow {
+type FunctionCallingAgentContextData = {
+  streaming: boolean;
+};
+
+export type FunctionCallingAgentInput = AgentInput & {
+  displayName: string;
+};
+
+export class FunctionCallingAgent extends Workflow<
+  FunctionCallingAgentContextData,
+  FunctionCallingAgentInput,
+  string | AsyncGenerator<boolean | ChatResponseChunk<object>>
+> {
   name: string;
   llm: ToolCallLLM;
   memory: ChatMemoryBuffer;
-  tools: BaseToolWithCall[];
+  tools: BaseToolWithCall[] | QueryEngineTool[];
   systemPrompt?: string;
   writeEvents: boolean;
   role?: string;
@@ -53,7 +64,9 @@ export class FunctionCallingAgent extends Workflow {
     });
     this.name = options?.name;
     this.llm = options.llm ?? (Settings.llm as ToolCallLLM);
-    this.checkToolCallSupport();
+    if (!(this.llm instanceof ToolCallLLM)) {
+      throw new Error("LLM is not a ToolCallLLM");
+    }
     this.memory = new ChatMemoryBuffer({
       llm: this.llm,
       chatHistory: options.chatHistory,
@@ -64,175 +77,103 @@ export class FunctionCallingAgent extends Workflow {
     this.role = options?.role;
 
     // add steps
-    this.addStep(StartEvent<AgentInput>, this.prepareChatHistory, {
-      outputs: InputEvent,
-    });
-    this.addStep(InputEvent, this.handleLLMInput, {
-      outputs: [ToolCallEvent, StopEvent],
-    });
-    this.addStep(ToolCallEvent, this.handleToolCalls, {
-      outputs: InputEvent,
-    });
+    this.addStep(
+      {
+        inputs: [StartEvent<AgentInput>],
+        outputs: [InputEvent],
+      },
+      this.prepareChatHistory,
+    );
+    this.addStep(
+      {
+        inputs: [InputEvent],
+        outputs: [ToolCallEvent, StopEvent],
+      },
+      this.handleLLMInput,
+    );
+    this.addStep(
+      {
+        inputs: [ToolCallEvent],
+        outputs: [InputEvent],
+      },
+      this.handleToolCalls,
+    );
   }
 
   private get chatHistory() {
     return this.memory.getMessages();
   }
 
-  private async prepareChatHistory(
-    ctx: Context,
+  prepareChatHistory = async (
+    ctx: HandlerContext<FunctionCallingAgentContextData>,
     ev: StartEvent<AgentInput>,
-  ): Promise<InputEvent> {
-    const { message, streaming } = ev.data.input;
-    ctx.set("streaming", streaming);
+  ): Promise<InputEvent> => {
+    const { message, streaming } = ev.data;
+    ctx.data.streaming = streaming ?? false;
     this.writeEvent(`Start to work on: ${message}`, ctx);
     if (this.systemPrompt) {
       this.memory.put({ role: "system", content: this.systemPrompt });
     }
     this.memory.put({ role: "user", content: message });
     return new InputEvent({ input: this.chatHistory });
-  }
+  };
 
-  private async handleLLMInput(
-    ctx: Context,
+  handleLLMInput = async (
+    ctx: HandlerContext<FunctionCallingAgentContextData>,
     ev: InputEvent,
-  ): Promise<StopEvent<string | AsyncGenerator> | ToolCallEvent> {
-    if (ctx.get("streaming")) {
-      return await this.handleLLMInputStream(ctx, ev);
+  ): Promise<StopEvent<string | AsyncGenerator> | ToolCallEvent> => {
+    const toolCallResponse = await chatWithTools(
+      this.llm,
+      this.tools,
+      this.chatHistory,
+    );
+    if (toolCallResponse.toolCallMessage) {
+      this.memory.put(toolCallResponse.toolCallMessage);
     }
 
-    const result = await this.llm.chat({
-      messages: this.chatHistory,
-      tools: this.tools,
-    });
-    this.memory.put(result.message);
-
-    const toolCalls = this.getToolCallsFromResponse(result);
-    if (toolCalls.length) {
-      return new ToolCallEvent({ toolCalls });
+    if (toolCallResponse.hasToolCall()) {
+      return new ToolCallEvent({ toolCalls: toolCallResponse.toolCalls });
     }
-    this.writeEvent("Finished task", ctx);
-    return new StopEvent({ result: result.message.content.toString() });
-  }
-
-  private async handleLLMInputStream(
-    context: Context,
-    ev: InputEvent,
-  ): Promise<StopEvent<AsyncGenerator> | ToolCallEvent> {
-    const { llm, tools, memory } = this;
-    const llmArgs = { messages: this.chatHistory, tools };
-
-    const responseGenerator = async function* () {
-      const responseStream = await llm.chat({ ...llmArgs, stream: true });
-
-      let fullResponse = null;
-      let yieldedIndicator = false;
-      for await (const chunk of responseStream) {
-        const hasToolCalls = chunk.options && "toolCall" in chunk.options;
-        if (!hasToolCalls) {
-          if (!yieldedIndicator) {
-            yield false;
-            yieldedIndicator = true;
-          }
-          yield chunk;
-        } else if (!yieldedIndicator) {
-          yield true;
-          yieldedIndicator = true;
-        }
-
-        fullResponse = chunk;
-      }
 
-      if (fullResponse?.options && Object.keys(fullResponse.options).length) {
-        memory.put({
-          role: "assistant",
-          content: "",
-          options: fullResponse.options,
-        });
-        yield fullResponse;
+    if (ctx.data.streaming) {
+      if (!toolCallResponse.responseGenerator) {
+        throw new Error("No streaming response");
       }
-    };
-
-    const generator = responseGenerator();
-    const isToolCall = await generator.next();
-    if (isToolCall.value) {
-      const fullResponse = await generator.next();
-      const toolCalls = this.getToolCallsFromResponse(
-        fullResponse.value as ChatResponseChunk<ToolCallLLMMessageOptions>,
-      );
-      return new ToolCallEvent({ toolCalls });
+      return new StopEvent(toolCallResponse.responseGenerator);
     }
 
-    this.writeEvent("Finished task", context);
-    return new StopEvent({ result: generator });
-  }
+    const fullResponse = await toolCallResponse.asFullResponse();
+    this.memory.put(fullResponse);
+    return new StopEvent(fullResponse.content.toString());
+  };
 
-  private async handleToolCalls(
-    ctx: Context,
+  handleToolCalls = async (
+    ctx: HandlerContext<FunctionCallingAgentContextData>,
     ev: ToolCallEvent,
-  ): Promise<InputEvent> {
+  ): Promise<InputEvent> => {
     const { toolCalls } = ev.data;
 
-    const toolMsgs: ChatMessage[] = [];
-
-    for (const call of toolCalls) {
-      const targetTool = this.tools.find(
-        (tool) => tool.metadata.name === call.name,
-      );
-      // TODO: make logger optional in callTool in framework
-      const toolOutput = await callTool(targetTool, call, {
-        log: () => {},
-        error: (...args: unknown[]) => {
-          console.error(`[Tool ${call.name} Error]:`, ...args);
-        },
-        warn: () => {},
-      });
-      toolMsgs.push({
-        content: JSON.stringify(toolOutput.output),
-        role: "user",
-        options: {
-          toolResult: {
-            result: toolOutput.output,
-            isError: toolOutput.isError,
-            id: call.id,
-          },
-        },
-      });
-    }
+    const toolMsgs = await callTools({
+      tools: this.tools,
+      toolCalls,
+      ctx,
+      agentName: this.name,
+    });
 
     for (const msg of toolMsgs) {
       this.memory.put(msg);
     }
 
     return new InputEvent({ input: this.memory.getMessages() });
-  }
+  };
 
-  private writeEvent(msg: string, context: Context) {
+  writeEvent = (
+    msg: string,
+    ctx: HandlerContext<FunctionCallingAgentContextData>,
+  ) => {
     if (!this.writeEvents) return;
-    context.writeEventToStream({
-      data: new AgentRunEvent({ name: this.name, msg }),
-    });
-  }
-
-  private checkToolCallSupport() {
-    const { supportToolCall } = this.llm as ToolCallLLM;
-    if (!supportToolCall) throw new Error("LLM does not support tool calls");
-  }
-
-  private getToolCallsFromResponse(
-    response:
-      | ChatResponse<ToolCallLLMMessageOptions>
-      | ChatResponseChunk<ToolCallLLMMessageOptions>,
-  ): ToolCall[] {
-    let options;
-    if ("message" in response) {
-      options = response.message.options;
-    } else {
-      options = response.options;
-    }
-    if (options && "toolCall" in options) {
-      return options.toolCall as ToolCall[];
-    }
-    return [];
-  }
+    ctx.sendEvent(
+      new AgentRunEvent({ agent: this.name, text: msg, type: "text" }),
+    );
+  };
 }
diff --git a/templates/components/multiagent/typescript/workflow/stream.ts b/templates/components/multiagent/typescript/workflow/stream.ts
index 6502be55c6bf9cf90d44e72591ab931e28a52e56..cb41e984e3b6a282294f84f724254d726ca4e9a8 100644
--- a/templates/components/multiagent/typescript/workflow/stream.ts
+++ b/templates/components/multiagent/typescript/workflow/stream.ts
@@ -1,65 +1,77 @@
-import { StopEvent } from "@llamaindex/core/workflow";
 import {
-  createCallbacksTransformer,
-  createStreamDataTransformer,
+  StopEvent,
+  WorkflowContext,
+  WorkflowEvent,
+} from "@llamaindex/workflow";
+import {
   StreamData,
+  createStreamDataTransformer,
   trimStartOfStreamHelper,
-  type AIStreamCallbacksAndOptions,
 } from "ai";
 import { ChatResponseChunk } from "llamaindex";
 import { AgentRunEvent } from "./type";
 
-export function toDataStream(
-  result: Promise<StopEvent<AsyncGenerator<ChatResponseChunk>>>,
-  callbacks?: AIStreamCallbacksAndOptions,
-) {
-  return toReadableStream(result)
-    .pipeThrough(createCallbacksTransformer(callbacks))
-    .pipeThrough(createStreamDataTransformer());
-}
-
-function toReadableStream(
-  result: Promise<StopEvent<AsyncGenerator<ChatResponseChunk>>>,
-) {
+export async function createStreamFromWorkflowContext<Input, Output, Context>(
+  context: WorkflowContext<Input, Output, Context>,
+): Promise<{ stream: ReadableStream<string>; dataStream: StreamData }> {
   const trimStartOfStream = trimStartOfStreamHelper();
-  return new ReadableStream<string>({
-    start(controller) {
-      controller.enqueue(""); // Kickstart the stream
+  const dataStream = new StreamData();
+  const encoder = new TextEncoder();
+  let generator: AsyncGenerator<ChatResponseChunk> | undefined;
+
+  const closeStreams = (controller: ReadableStreamDefaultController) => {
+    controller.close();
+    dataStream.close();
+  };
+
+  const mainStream = new ReadableStream({
+    async start(controller) {
+      // Kickstart the stream by sending an empty string
+      controller.enqueue(encoder.encode(""));
     },
-    async pull(controller): Promise<void> {
-      const stopEvent = await result;
-      const generator = stopEvent.data.result;
-      const { value, done } = await generator.next();
+    async pull(controller) {
+      while (!generator) {
+        // get next event from workflow context
+        const { value: event, done } =
+          await context[Symbol.asyncIterator]().next();
+        if (done) {
+          closeStreams(controller);
+          return;
+        }
+        generator = handleEvent(event, dataStream);
+      }
+
+      const { value: chunk, done } = await generator.next();
       if (done) {
-        controller.close();
+        closeStreams(controller);
         return;
       }
-
-      const text = trimStartOfStream(value.delta ?? "");
-      if (text) controller.enqueue(text);
+      const text = trimStartOfStream(chunk.delta ?? "");
+      if (text) {
+        controller.enqueue(encoder.encode(text));
+      }
     },
   });
-}
 
-export async function workflowEventsToStreamData(
-  events: AsyncIterable<AgentRunEvent>,
-): Promise<StreamData> {
-  const streamData = new StreamData();
-
-  (async () => {
-    for await (const event of events) {
-      if (event instanceof AgentRunEvent) {
-        const { name, msg } = event.data;
-        if ((streamData as any).isClosed) {
-          break;
-        }
-        streamData.appendMessageAnnotation({
-          type: "agent",
-          data: { agent: name, text: msg },
-        });
-      }
-    }
-  })();
+  return {
+    stream: mainStream.pipeThrough(createStreamDataTransformer()),
+    dataStream,
+  };
+}
 
-  return streamData;
+function handleEvent(
+  event: WorkflowEvent<any>,
+  dataStream: StreamData,
+): AsyncGenerator<ChatResponseChunk> | undefined {
+  // Handle for StopEvent
+  if (event instanceof StopEvent) {
+    return event.data as AsyncGenerator<ChatResponseChunk>;
+  }
+  // Handle for AgentRunEvent
+  if (event instanceof AgentRunEvent) {
+    dataStream.appendMessageAnnotation({
+      type: "agent",
+      data: event.data,
+    });
+  }
 }
diff --git a/templates/components/multiagent/typescript/workflow/tools.ts b/templates/components/multiagent/typescript/workflow/tools.ts
new file mode 100644
index 0000000000000000000000000000000000000000..f5e94fbaeea4a2ef0677905eb063f957d8e12123
--- /dev/null
+++ b/templates/components/multiagent/typescript/workflow/tools.ts
@@ -0,0 +1,342 @@
+import { HandlerContext } from "@llamaindex/workflow";
+import {
+  BaseToolWithCall,
+  callTool,
+  ChatMessage,
+  ChatResponse,
+  ChatResponseChunk,
+  LlamaCloudIndex,
+  PartialToolCall,
+  QueryEngineTool,
+  ToolCall,
+  ToolCallLLM,
+  ToolCallLLMMessageOptions,
+} from "llamaindex";
+import crypto from "node:crypto";
+import { getDataSource } from "../engine";
+import { AgentRunEvent } from "./type";
+
+export const getQueryEngineTools = async (): Promise<
+  QueryEngineTool[] | null
+> => {
+  const topK = process.env.TOP_K ? parseInt(process.env.TOP_K) : undefined;
+
+  const index = await getDataSource();
+  if (!index) {
+    return null;
+  }
+  // index is LlamaCloudIndex use two query engine tools
+  if (index instanceof LlamaCloudIndex) {
+    return [
+      new QueryEngineTool({
+        queryEngine: index.asQueryEngine({
+          similarityTopK: topK,
+          retrieval_mode: "files_via_content",
+        }),
+        metadata: {
+          name: "document_retriever",
+          description: `Document retriever that retrieves entire documents from the corpus.
+  ONLY use for research questions that may require searching over entire research reports.
+  Will be slower and more expensive than chunk-level retrieval but may be necessary.`,
+        },
+      }),
+      new QueryEngineTool({
+        queryEngine: index.asQueryEngine({
+          similarityTopK: topK,
+          retrieval_mode: "chunks",
+        }),
+        metadata: {
+          name: "chunk_retriever",
+          description: `Retrieves a small set of relevant document chunks from the corpus.
+      Use for research questions that want to look up specific facts from the knowledge corpus,
+      and need entire documents.`,
+        },
+      }),
+    ];
+  } else {
+    return [
+      new QueryEngineTool({
+        queryEngine: index.asQueryEngine({
+          similarityTopK: topK,
+        }),
+        metadata: {
+          name: "retriever",
+          description: `Use this tool to retrieve information about the text corpus from the index.`,
+        },
+      }),
+    ];
+  }
+};
+
+/**
+ * Call multiple tools and return the tool messages
+ */
+export const callTools = async <T>({
+  tools,
+  toolCalls,
+  ctx,
+  agentName,
+  writeEvent = true,
+}: {
+  toolCalls: ToolCall[];
+  tools: BaseToolWithCall[];
+  ctx: HandlerContext<T>;
+  agentName: string;
+  writeEvent?: boolean;
+}): Promise<ChatMessage[]> => {
+  const toolMsgs: ChatMessage[] = [];
+  if (toolCalls.length === 0) {
+    return toolMsgs;
+  }
+  if (toolCalls.length === 1) {
+    const tool = tools.find((tool) => tool.metadata.name === toolCalls[0].name);
+    if (!tool) {
+      throw new Error(`Tool ${toolCalls[0].name} not found`);
+    }
+    return [
+      await callSingleTool(
+        tool,
+        toolCalls[0],
+        writeEvent
+          ? (msg: string) => {
+              ctx.sendEvent(
+                new AgentRunEvent({
+                  agent: agentName,
+                  text: msg,
+                  type: "text",
+                }),
+              );
+            }
+          : undefined,
+      ),
+    ];
+  }
+  // Multiple tool calls, show events in progress
+  const progressId = crypto.randomUUID();
+  const totalSteps = toolCalls.length;
+  let currentStep = 0;
+  for (const toolCall of toolCalls) {
+    const tool = tools.find((tool) => tool.metadata.name === toolCall.name);
+    if (!tool) {
+      throw new Error(`Tool ${toolCall.name} not found`);
+    }
+    const toolMsg = await callSingleTool(tool, toolCall, (msg: string) => {
+      ctx.sendEvent(
+        new AgentRunEvent({
+          agent: agentName,
+          text: msg,
+          type: "progress",
+          data: {
+            id: progressId,
+            total: totalSteps,
+            current: currentStep,
+          },
+        }),
+      );
+      currentStep++;
+    });
+    toolMsgs.push(toolMsg);
+  }
+  return toolMsgs;
+};
+
+export const callSingleTool = async (
+  tool: BaseToolWithCall,
+  toolCall: ToolCall,
+  eventEmitter?: (msg: string) => void,
+): Promise<ChatMessage> => {
+  if (eventEmitter) {
+    eventEmitter(
+      `Calling tool ${toolCall.name} with input: ${JSON.stringify(toolCall.input)}`,
+    );
+  }
+
+  const toolOutput = await callTool(tool, toolCall, {
+    log: () => {},
+    error: (...args: unknown[]) => {
+      console.error(`Tool ${toolCall.name} got error:`, ...args);
+      if (eventEmitter) {
+        eventEmitter(`Tool ${toolCall.name} got error: ${args.join(" ")}`);
+      }
+      return {
+        content: JSON.stringify({
+          error: args.join(" "),
+        }),
+        role: "user",
+        options: {
+          toolResult: {
+            id: toolCall.id,
+            result: JSON.stringify({
+              error: args.join(" "),
+            }),
+            isError: true,
+          },
+        },
+      };
+    },
+    warn: () => {},
+  });
+
+  return {
+    content: JSON.stringify(toolOutput.output),
+    role: "user",
+    options: {
+      toolResult: {
+        result: toolOutput.output,
+        isError: toolOutput.isError,
+        id: toolCall.id,
+      },
+    },
+  };
+};
+
+class ChatWithToolsResponse {
+  toolCalls: ToolCall[];
+  toolCallMessage?: ChatMessage;
+  responseGenerator?: AsyncGenerator<ChatResponseChunk>;
+
+  constructor(options: {
+    toolCalls: ToolCall[];
+    toolCallMessage?: ChatMessage;
+    responseGenerator?: AsyncGenerator<ChatResponseChunk>;
+  }) {
+    this.toolCalls = options.toolCalls;
+    this.toolCallMessage = options.toolCallMessage;
+    this.responseGenerator = options.responseGenerator;
+  }
+
+  hasMultipleTools() {
+    const uniqueToolNames = new Set(this.getToolNames());
+    return uniqueToolNames.size > 1;
+  }
+
+  hasToolCall() {
+    return this.toolCalls.length > 0;
+  }
+
+  getToolNames() {
+    return this.toolCalls.map((toolCall) => toolCall.name);
+  }
+
+  async asFullResponse(): Promise<ChatMessage> {
+    if (!this.responseGenerator) {
+      throw new Error("No response generator");
+    }
+    let fullResponse = "";
+    for await (const chunk of this.responseGenerator) {
+      fullResponse += chunk.delta;
+    }
+    return {
+      role: "assistant",
+      content: fullResponse,
+    };
+  }
+}
+
+export const chatWithTools = async (
+  llm: ToolCallLLM,
+  tools: BaseToolWithCall[],
+  messages: ChatMessage[],
+): Promise<ChatWithToolsResponse> => {
+  const responseGenerator = async function* (): AsyncGenerator<
+    boolean | ChatResponseChunk,
+    void,
+    unknown
+  > {
+    const responseStream = await llm.chat({ messages, tools, stream: true });
+
+    let fullResponse = null;
+    let yieldedIndicator = false;
+    const toolCallMap = new Map();
+    for await (const chunk of responseStream) {
+      const hasToolCalls = chunk.options && "toolCall" in chunk.options;
+      if (!hasToolCalls) {
+        if (!yieldedIndicator) {
+          yield false;
+          yieldedIndicator = true;
+        }
+        yield chunk;
+      } else if (!yieldedIndicator) {
+        yield true;
+        yieldedIndicator = true;
+      }
+
+      if (chunk.options && "toolCall" in chunk.options) {
+        for (const toolCall of chunk.options.toolCall as PartialToolCall[]) {
+          if (toolCall.id) {
+            toolCallMap.set(toolCall.id, toolCall);
+          }
+        }
+      }
+
+      if (
+        hasToolCalls &&
+        (chunk.raw as any)?.choices?.[0]?.finish_reason !== null
+      ) {
+        // Update the fullResponse with the tool calls
+        const toolCalls = Array.from(toolCallMap.values());
+        fullResponse = {
+          ...chunk,
+          options: {
+            ...chunk.options,
+            toolCall: toolCalls,
+          },
+        };
+      }
+    }
+
+    if (fullResponse) {
+      yield fullResponse;
+    }
+  };
+
+  const generator = responseGenerator();
+  const isToolCall = await generator.next();
+
+  if (isToolCall.value) {
+    // If it's a tool call, we need to wait for the full response
+    let fullResponse = null;
+    for await (const chunk of generator) {
+      fullResponse = chunk;
+    }
+
+    if (fullResponse) {
+      const responseChunk = fullResponse as ChatResponseChunk;
+      const toolCalls = getToolCallsFromResponse(responseChunk);
+      return new ChatWithToolsResponse({
+        toolCalls,
+        toolCallMessage: {
+          options: responseChunk.options,
+          role: "assistant",
+          content: "",
+        },
+      });
+    } else {
+      throw new Error("Cannot get tool calls from response");
+    }
+  }
+
+  return new ChatWithToolsResponse({
+    toolCalls: [],
+    responseGenerator: generator as AsyncGenerator<ChatResponseChunk>,
+  });
+};
+
+export const getToolCallsFromResponse = (
+  response:
+    | ChatResponse<ToolCallLLMMessageOptions>
+    | ChatResponseChunk<ToolCallLLMMessageOptions>,
+): ToolCall[] => {
+  let options;
+
+  if ("message" in response) {
+    options = response.message.options;
+  } else {
+    options = response.options;
+  }
+
+  if (options && "toolCall" in options) {
+    return options.toolCall as ToolCall[];
+  }
+  return [];
+};
diff --git a/templates/components/multiagent/typescript/workflow/type.ts b/templates/components/multiagent/typescript/workflow/type.ts
index 2e2fdedbd03088bc6fc51298be304df4d0bd5e9e..541b1ba9e6357d3d24ff05a2a6cc37600694506c 100644
--- a/templates/components/multiagent/typescript/workflow/type.ts
+++ b/templates/components/multiagent/typescript/workflow/type.ts
@@ -1,11 +1,24 @@
-import { WorkflowEvent } from "@llamaindex/core/workflow";
+import { WorkflowEvent } from "@llamaindex/workflow";
+import { MessageContent } from "llamaindex";
 
 export type AgentInput = {
-  message: string;
+  message: MessageContent;
   streaming?: boolean;
 };
 
+export type AgentRunEventType = "text" | "progress";
+
+export type ProgressEventData = {
+  id: string;
+  total: number;
+  current: number;
+};
+
+export type AgentRunEventData = ProgressEventData;
+
 export class AgentRunEvent extends WorkflowEvent<{
-  name: string;
-  msg: string;
+  agent: string;
+  text: string;
+  type: AgentRunEventType;
+  data?: AgentRunEventData;
 }> {}
diff --git a/templates/types/streaming/express/package.json b/templates/types/streaming/express/package.json
index 11747f1f142341d4424f4c44644fd1ede713a48a..d39e2cf32445c4ba8140494cc20c5ecf862e3def 100644
--- a/templates/types/streaming/express/package.json
+++ b/templates/types/streaming/express/package.json
@@ -27,12 +27,15 @@
     "got": "^14.4.1",
     "@apidevtools/swagger-parser": "^10.1.0",
     "formdata-node": "^6.0.3",
-    "marked": "^14.1.2"
+    "marked": "^14.1.2",
+    "papaparse": "^5.4.1"
   },
   "devDependencies": {
     "@types/cors": "^2.8.16",
     "@types/express": "^4.17.21",
     "@types/node": "^20.9.5",
+    "@llamaindex/workflow": "^0.0.3",
+    "@types/papaparse": "^5.3.15",
     "concurrently": "^8.2.2",
     "eslint": "^8.54.0",
     "eslint-config-prettier": "^8.10.0",
diff --git a/templates/types/streaming/nextjs/package.json b/templates/types/streaming/nextjs/package.json
index 195330677b3be56418d11460e2e1c9a66681e01e..6e4c036a272ded33d8a9d3d09d66744c2954469a 100644
--- a/templates/types/streaming/nextjs/package.json
+++ b/templates/types/streaming/nextjs/package.json
@@ -18,6 +18,7 @@
     "@radix-ui/react-select": "^2.1.1",
     "@radix-ui/react-slot": "^1.0.2",
     "@radix-ui/react-tabs": "^1.1.0",
+    "@llamaindex/chat-ui": "0.0.5",
     "ai": "3.3.42",
     "ajv": "^8.12.0",
     "class-variance-authority": "^0.7.0",
@@ -29,6 +30,7 @@
     "llamaindex": "0.8.2",
     "lucide-react": "^0.294.0",
     "next": "^14.2.4",
+    "papaparse": "^5.4.1",
     "react": "^18.2.0",
     "react-dom": "^18.2.0",
     "supports-color": "^8.1.1",
@@ -36,14 +38,15 @@
     "tiktoken": "^1.0.15",
     "uuid": "^9.0.1",
     "vaul": "^0.9.1",
-    "marked": "^14.1.2",
-    "@llamaindex/chat-ui": "0.0.5"
+    "marked": "^14.1.2"
   },
   "devDependencies": {
     "@types/node": "^20.10.3",
     "@types/react": "^18.2.42",
     "@types/react-dom": "^18.2.17",
     "@types/uuid": "^9.0.8",
+    "@llamaindex/workflow": "^0.0.3",
+    "@types/papaparse": "^5.3.15",
     "autoprefixer": "^10.4.16",
     "cross-env": "^7.0.3",
     "eslint": "^8.55.0",