From 40ee7610b278a47f055549c941d7d65d6bde6ee8 Mon Sep 17 00:00:00 2001
From: Marcus Schiesser <mail@marcusschiesser.de>
Date: Tue, 11 Mar 2025 11:06:21 +0700
Subject: [PATCH] feat: add asQueryTool to index and add factory methods for
 simplifying agent usage (#1715)

---
 .changeset/sour-rats-complain.md              |   7 +
 .changeset/wise-ghosts-play.md                |   5 +
 apps/next/src/app/(home)/page.tsx             |  19 +--
 .../llamaindex/modules/agent_workflow.mdx     |  38 ++---
 .../nextjs-node-runtime/src/actions/openai.ts |  21 +--
 examples/agent/large_toolcall.ts              |   4 +-
 examples/agent/openai.ts                      |   4 +-
 examples/agent/wiki.ts                        |   4 +-
 .../{blog_writer.ts => blog-writer.ts}        |  10 +-
 ...{multiple_agents.ts => multiple-agents.ts} |  10 +-
 .../{single_agent.ts => single-agent.ts}      |   8 +-
 examples/agentworkflow/with-anthropic.ts      |  10 +-
 packages/core/src/utils/index.ts              |   3 +-
 packages/core/src/utils/llms.ts               |  88 ------------
 packages/core/src/utils/mock.ts               |  93 ++++++++++++
 packages/llamaindex/src/indices/BaseIndex.ts  |  69 +++++++++
 .../llamaindex/src/tools/QueryEngineTool.ts   |   2 +-
 packages/workflow/package.json                |   6 +-
 packages/workflow/src/agent/agent-workflow.ts | 132 +++++++++++++-----
 packages/workflow/src/agent/function-agent.ts |  60 +++++---
 packages/workflow/src/agent/index.ts          |  22 +--
 .../workflow/test}/agent-workflow.test.ts     | 125 ++---------------
 .../workflow/test}/function-agent.test.ts     |  15 +-
 packages/workflow/test/mock.ts                | 116 +++++++++++++++
 .../workflow/test}/workflow.test.ts           |   4 +-
 pnpm-lock.yaml                                |  39 +++---
 26 files changed, 529 insertions(+), 385 deletions(-)
 create mode 100644 .changeset/sour-rats-complain.md
 create mode 100644 .changeset/wise-ghosts-play.md
 rename examples/agentworkflow/{blog_writer.ts => blog-writer.ts} (93%)
 rename examples/agentworkflow/{multiple_agents.ts => multiple-agents.ts} (94%)
 rename examples/agentworkflow/{single_agent.ts => single-agent.ts} (87%)
 create mode 100644 packages/core/src/utils/mock.ts
 rename {unit/workflow => packages/workflow/test}/agent-workflow.test.ts (53%)
 rename {unit/workflow => packages/workflow/test}/function-agent.test.ts (85%)
 create mode 100644 packages/workflow/test/mock.ts
 rename {unit/workflow => packages/workflow/test}/workflow.test.ts (99%)

diff --git a/.changeset/sour-rats-complain.md b/.changeset/sour-rats-complain.md
new file mode 100644
index 000000000..86597ed25
--- /dev/null
+++ b/.changeset/sour-rats-complain.md
@@ -0,0 +1,7 @@
+---
+"llamaindex": patch
+"@llamaindex/workflow": patch
+"@llamaindex/core": patch
+---
+
+Add factory methods agent and multiAgent to simplify agent usage
diff --git a/.changeset/wise-ghosts-play.md b/.changeset/wise-ghosts-play.md
new file mode 100644
index 000000000..79260d155
--- /dev/null
+++ b/.changeset/wise-ghosts-play.md
@@ -0,0 +1,5 @@
+---
+"llamaindex": patch
+---
+
+feat: add asQueryTool to index
diff --git a/apps/next/src/app/(home)/page.tsx b/apps/next/src/app/(home)/page.tsx
index f94edc4d5..d3492ab6c 100644
--- a/apps/next/src/app/(home)/page.tsx
+++ b/apps/next/src/app/(home)/page.tsx
@@ -125,19 +125,20 @@ const response = await agent.chat({
           description="Truly powerful retrieval-augmented generation applications use agentic techniques, and LlamaIndex.TS makes it easy to build them."
         >
           <CodeBlock
-            code={`import { FunctionTool } from "llamaindex";
-import { OpenAIAgent } from "@llamaindex/openai";
+            code={`import { agent } from "llamaindex";
+import { OpenAI } from "@llamaindex/openai";
 
-const interpreterTool = FunctionTool.from(...);
-const systemPrompt = \`...\`;
+// using a previously created LlamaIndex index to query information from
+const queryTool = index.queryTool();
 
-const agent = new OpenAIAgent({
-  llm,
-  tools: [interpreterTool],
-  systemPrompt,
+const agent = agent({
+  llm: new OpenAI({
+    model: "gpt-4o",
+  }),
+  tools: [queryTool],
 });
 
-await agent.chat('...');`}
+await agent.run('...');`}
             lang="ts"
           />
         </Feature>
diff --git a/apps/next/src/content/docs/llamaindex/modules/agent_workflow.mdx b/apps/next/src/content/docs/llamaindex/modules/agent_workflow.mdx
index 52cce3965..3d994dcc1 100644
--- a/apps/next/src/content/docs/llamaindex/modules/agent_workflow.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/agent_workflow.mdx
@@ -6,25 +6,7 @@ import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock';
 import CodeSource from "!raw-loader!../../../../../../../examples/agentworkflow/blog_writer.ts";
 import { Tab, Tabs } from "fumadocs-ui/components/tabs";
 
-`AgentWorkflow` is a powerful system that enables you to create and orchestrate one or multiple agents with tools to perform specific tasks. It's built on top of the base `Workflow` system and provides a streamlined interface for agent interactions.
-
-## Installation
-
-You'll need to install the `@llamaindex/workflow` package:
-
-<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
-	```shell tab="npm"
-	npm install @llamaindex/workflow
-	```
-
-	```shell tab="yarn"
-	yarn add @llamaindex/workflow
-	```
-
-	```shell tab="pnpm"
-	pnpm add @llamaindex/workflow
-	```
-</Tabs>
+Agent Workflows are a powerful system that enables you to create and orchestrate one or multiple agents with tools to perform specific tasks. It's built on top of the base `Workflow` system and provides a streamlined interface for agent interactions.
 
 ## Usage
 
@@ -33,7 +15,7 @@ You'll need to install the `@llamaindex/workflow` package:
 The simplest use case is creating a single agent with specific tools. Here's an example of creating an assistant that tells jokes:
 
 ```typescript
-import { AgentWorkflow, FunctionTool } from "llamaindex";
+import { agent, FunctionTool } from "llamaindex";
 import { OpenAI } from "@llamaindex/openai";
 
 // Define a joke-telling tool
@@ -45,8 +27,8 @@ const jokeTool = FunctionTool.from(
   }
 );
 
-// Create an agent workflow with the tool
-const workflow = AgentWorkflow.fromTools({
+// Create an single agent workflow with the tool
+const workflow = agent({
   tools: [jokeTool],
   llm: new OpenAI({
     model: "gpt-4o-mini",
@@ -60,7 +42,7 @@ console.log(result); // Baby Llama is called cria
 
 ### Event Streaming
 
-`AgentWorkflow` provides a unified interface for event streaming, making it easy to track and respond to different events during execution:
+Agent Workflows provide a unified interface for event streaming, making it easy to track and respond to different events during execution:
 
 ```typescript
 import { AgentToolCall, AgentStream } from "llamaindex";
@@ -81,7 +63,7 @@ for await (const event of context) {
 
 ### Multi-Agent Workflow
 
-`AgentWorkflow` can orchestrate multiple agents, enabling complex interactions and task handoffs. Each agent in a multi-agent workflow requires:
+An Agent Workflow can orchestrate multiple agents, enabling complex interactions and task handoffs. Each agent in a multi-agent workflow requires:
 
 - `name`: Unique identifier for the agent
 - `description`: Purpose description used for task routing
@@ -91,12 +73,12 @@ for await (const event of context) {
 Here's an example of a multi-agent system that combines joke-telling and weather information:
 
 ```typescript
-import { AgentWorkflow, FunctionAgent, FunctionTool } from "llamaindex";
+import { multiAgent, agent, FunctionTool } from "llamaindex";
 import { OpenAI } from "@llamaindex/openai";
 import { z } from "zod";
 
 // Create a weather agent
-const weatherAgent = new FunctionAgent({
+const weatherAgent = agent({
   name: "WeatherAgent",
   description: "Provides weather information for any city",
   tools: [
@@ -115,7 +97,7 @@ const weatherAgent = new FunctionAgent({
 });
 
 // Create a joke-telling agent
-const jokeAgent = new FunctionAgent({
+const jokeAgent = agent({
   name: "JokeAgent",
   description: "Tells jokes and funny stories",
   tools: [jokeTool], // Using the joke tool defined earlier
@@ -124,7 +106,7 @@ const jokeAgent = new FunctionAgent({
 });
 
 // Create the multi-agent workflow
-const workflow = new AgentWorkflow({
+const workflow = multiAgent({
   agents: [jokeAgent, weatherAgent],
   rootAgent: jokeAgent, // Start with the joke agent
 });
diff --git a/e2e/examples/nextjs-node-runtime/src/actions/openai.ts b/e2e/examples/nextjs-node-runtime/src/actions/openai.ts
index 65464435f..230452abc 100644
--- a/e2e/examples/nextjs-node-runtime/src/actions/openai.ts
+++ b/e2e/examples/nextjs-node-runtime/src/actions/openai.ts
@@ -1,13 +1,7 @@
 "use server";
 import { HuggingFaceEmbedding } from "@llamaindex/huggingface";
 import { SimpleDirectoryReader } from "@llamaindex/readers/directory";
-import {
-  OpenAI,
-  OpenAIAgent,
-  QueryEngineTool,
-  Settings,
-  VectorStoreIndex,
-} from "llamaindex";
+import { OpenAI, OpenAIAgent, Settings, VectorStoreIndex } from "llamaindex";
 
 Settings.llm = new OpenAI({
   apiKey: process.env.NEXT_PUBLIC_OPENAI_KEY ?? "FAKE_KEY_TO_PASS_TESTS",
@@ -31,23 +25,20 @@ export async function getOpenAIModelRequest(query: string) {
     const reader = new SimpleDirectoryReader();
     const documents = await reader.loadData(currentDir);
     const index = await VectorStoreIndex.fromDocuments(documents);
-    const retriever = index.asRetriever({
-      similarityTopK: 10,
-    });
-    const queryEngine = index.asQueryEngine({
-      retriever,
-    });
 
     // define the query engine as a tool
     const tools = [
-      new QueryEngineTool({
-        queryEngine: queryEngine,
+      index.queryTool({
+        options: {
+          similarityTopK: 10,
+        },
         metadata: {
           name: "deployment_details_per_env",
           description: `This tool can answer detailed questions about deployments happened in various environments.`,
         },
       }),
     ];
+
     // create the agent
     const agent = new OpenAIAgent({ tools });
 
diff --git a/examples/agent/large_toolcall.ts b/examples/agent/large_toolcall.ts
index a31c731e0..065adc00e 100644
--- a/examples/agent/large_toolcall.ts
+++ b/examples/agent/large_toolcall.ts
@@ -1,5 +1,5 @@
 import { OpenAI } from "@llamaindex/openai";
-import { AgentWorkflow, FunctionTool } from "llamaindex";
+import { FunctionTool, agent } from "llamaindex";
 import { z } from "zod";
 
 const csvData =
@@ -33,7 +33,7 @@ const userQuestion = "which are the best comedies after 2010?";
   const systemPrompt =
     "You are a Python interpreter.\n        - You are given tasks to complete and you run python code to solve them.\n        - The python code runs in a Jupyter notebook. Every time you call $(interpreter) tool, the python code is executed in a separate cell. It's okay to make multiple calls to $(interpreter).\n        - Display visualizations using matplotlib or any other visualization library directly in the notebook. Shouldn't save the visualizations to a file, just return the base64 encoded data.\n        - You can install any pip package (if it exists) if you need to but the usual packages for data analysis are already preinstalled.\n        - You can run any python code you want in a secure environment.";
 
-  const workflow = AgentWorkflow.fromTools({
+  const workflow = agent({
     tools: [interpreterTool],
     llm,
     verbose: false,
diff --git a/examples/agent/openai.ts b/examples/agent/openai.ts
index a1da4ac8d..e450452d2 100644
--- a/examples/agent/openai.ts
+++ b/examples/agent/openai.ts
@@ -1,5 +1,5 @@
 import { OpenAI } from "@llamaindex/openai";
-import { AgentWorkflow, FunctionTool } from "llamaindex";
+import { FunctionTool, agent } from "llamaindex";
 import { z } from "zod";
 
 const sumNumbers = FunctionTool.from(
@@ -27,7 +27,7 @@ const divideNumbers = FunctionTool.from(
 );
 
 async function main() {
-  const workflow = AgentWorkflow.fromTools({
+  const workflow = agent({
     tools: [sumNumbers, divideNumbers],
     llm: new OpenAI({ model: "gpt-4o-mini" }),
     verbose: false,
diff --git a/examples/agent/wiki.ts b/examples/agent/wiki.ts
index 4e3c29a23..70d1cc1d9 100644
--- a/examples/agent/wiki.ts
+++ b/examples/agent/wiki.ts
@@ -1,12 +1,12 @@
 import { OpenAI } from "@llamaindex/openai";
-import { AgentStream, AgentWorkflow } from "llamaindex";
+import { AgentStream, agent } from "llamaindex";
 import { WikipediaTool } from "../wiki";
 
 async function main() {
   const llm = new OpenAI({ model: "gpt-4-turbo" });
   const wikiTool = new WikipediaTool();
 
-  const workflow = AgentWorkflow.fromTools({
+  const workflow = agent({
     tools: [wikiTool],
     llm,
     verbose: false,
diff --git a/examples/agentworkflow/blog_writer.ts b/examples/agentworkflow/blog-writer.ts
similarity index 93%
rename from examples/agentworkflow/blog_writer.ts
rename to examples/agentworkflow/blog-writer.ts
index 7fae09145..a8bb64830 100644
--- a/examples/agentworkflow/blog_writer.ts
+++ b/examples/agentworkflow/blog-writer.ts
@@ -1,11 +1,11 @@
 import { OpenAI } from "@llamaindex/openai";
 import fs from "fs";
 import {
+  agent,
   AgentToolCall,
   AgentToolCallResult,
-  AgentWorkflow,
-  FunctionAgent,
   FunctionTool,
+  multiAgent,
 } from "llamaindex";
 import os from "os";
 import { z } from "zod";
@@ -34,7 +34,7 @@ const saveFileTool = FunctionTool.from(
 );
 
 async function main() {
-  const reportAgent = new FunctionAgent({
+  const reportAgent = agent({
     name: "ReportAgent",
     description:
       "Responsible for crafting well-written blog posts based on research findings",
@@ -43,7 +43,7 @@ async function main() {
     llm,
   });
 
-  const researchAgent = new FunctionAgent({
+  const researchAgent = agent({
     name: "ResearchAgent",
     description:
       "Responsible for gathering relevant information from the internet",
@@ -53,7 +53,7 @@ async function main() {
     llm,
   });
 
-  const workflow = new AgentWorkflow({
+  const workflow = multiAgent({
     agents: [researchAgent, reportAgent],
     rootAgent: researchAgent,
   });
diff --git a/examples/agentworkflow/multiple_agents.ts b/examples/agentworkflow/multiple-agents.ts
similarity index 94%
rename from examples/agentworkflow/multiple_agents.ts
rename to examples/agentworkflow/multiple-agents.ts
index 1340ebc98..4f74a65b4 100644
--- a/examples/agentworkflow/multiple_agents.ts
+++ b/examples/agentworkflow/multiple-agents.ts
@@ -5,14 +5,14 @@
  */
 import { OpenAI } from "@llamaindex/openai";
 import {
+  agent,
   AgentInput,
   AgentOutput,
   AgentStream,
   AgentToolCall,
   AgentToolCallResult,
-  AgentWorkflow,
-  FunctionAgent,
   FunctionTool,
+  multiAgent,
   StopEvent,
 } from "llamaindex";
 import { z } from "zod";
@@ -55,7 +55,7 @@ const temperatureFetcherTool = FunctionTool.from(
 
 // Create agents
 async function multiWeatherAgent() {
-  const converterAgent = new FunctionAgent({
+  const converterAgent = agent({
     name: "TemperatureConverterAgent",
     description:
       "An agent that can convert temperatures from Fahrenheit to Celsius.",
@@ -63,7 +63,7 @@ async function multiWeatherAgent() {
     llm,
   });
 
-  const weatherAgent = new FunctionAgent({
+  const weatherAgent = agent({
     name: "FetchWeatherAgent",
     description: "An agent that can get the weather in a city. ",
     systemPrompt:
@@ -76,7 +76,7 @@ async function multiWeatherAgent() {
   });
 
   // Create agent workflow with the agents
-  const workflow = new AgentWorkflow({
+  const workflow = multiAgent({
     agents: [weatherAgent, converterAgent],
     rootAgent: weatherAgent,
     verbose: false,
diff --git a/examples/agentworkflow/single_agent.ts b/examples/agentworkflow/single-agent.ts
similarity index 87%
rename from examples/agentworkflow/single_agent.ts
rename to examples/agentworkflow/single-agent.ts
index 9a849ab52..dc12d877c 100644
--- a/examples/agentworkflow/single_agent.ts
+++ b/examples/agentworkflow/single-agent.ts
@@ -2,17 +2,15 @@
  * This example shows how to use AgentWorkflow as a single agent with tools
  */
 import { OpenAI } from "@llamaindex/openai";
-import { AgentWorkflow, Settings } from "llamaindex";
+import { Settings, agent } from "llamaindex";
 import { getWeatherTool } from "../agent/utils/tools";
 
-const llm = new OpenAI({
+Settings.llm = new OpenAI({
   model: "gpt-4o",
 });
 
-Settings.llm = llm;
-
 async function singleWeatherAgent() {
-  const workflow = AgentWorkflow.fromTools({
+  const workflow = agent({
     tools: [getWeatherTool],
     verbose: false,
   });
diff --git a/examples/agentworkflow/with-anthropic.ts b/examples/agentworkflow/with-anthropic.ts
index b9ce96378..f9d47faea 100644
--- a/examples/agentworkflow/with-anthropic.ts
+++ b/examples/agentworkflow/with-anthropic.ts
@@ -1,10 +1,10 @@
 import fs from "fs";
 import {
+  agent,
   AgentToolCall,
   AgentToolCallResult,
-  AgentWorkflow,
-  FunctionAgent,
   FunctionTool,
+  multiAgent,
 } from "llamaindex";
 import { z } from "zod";
 
@@ -63,7 +63,7 @@ const saveFileTool = FunctionTool.from(
 );
 
 async function main() {
-  const reportAgent = new FunctionAgent({
+  const reportAgent = agent({
     name: "ReportAgent",
     description:
       "Responsible for creating concise reports about weather and inflation data",
@@ -72,7 +72,7 @@ async function main() {
     llm,
   });
 
-  const researchAgent = new FunctionAgent({
+  const researchAgent = agent({
     name: "ResearchAgent",
     description:
       "Responsible for gathering relevant information from the internet",
@@ -82,7 +82,7 @@ async function main() {
     llm,
   });
 
-  const workflow = new AgentWorkflow({
+  const workflow = multiAgent({
     agents: [researchAgent, reportAgent],
     rootAgent: researchAgent,
   });
diff --git a/packages/core/src/utils/index.ts b/packages/core/src/utils/index.ts
index 920a6787a..f21e34dd9 100644
--- a/packages/core/src/utils/index.ts
+++ b/packages/core/src/utils/index.ts
@@ -80,8 +80,9 @@ export {
   extractText,
   imageToDataUrl,
   messagesToHistory,
-  MockLLM,
   toToolDescriptions,
 } from "./llms";
 
+export { MockLLM } from "./mock";
+
 export { objectEntries } from "./object-entries";
diff --git a/packages/core/src/utils/llms.ts b/packages/core/src/utils/llms.ts
index c08933667..255b82b91 100644
--- a/packages/core/src/utils/llms.ts
+++ b/packages/core/src/utils/llms.ts
@@ -2,15 +2,6 @@ import { fs } from "@llamaindex/env";
 import { filetypemime } from "magic-bytes.js";
 import type {
   ChatMessage,
-  ChatResponse,
-  ChatResponseChunk,
-  CompletionResponse,
-  LLM,
-  LLMChatParamsNonStreaming,
-  LLMChatParamsStreaming,
-  LLMCompletionParamsNonStreaming,
-  LLMCompletionParamsStreaming,
-  LLMMetadata,
   MessageContent,
   MessageContentDetail,
   MessageContentTextDetail,
@@ -152,82 +143,3 @@ export async function imageToDataUrl(
   }
   return await blobToDataUrl(input);
 }
-
-export class MockLLM implements LLM {
-  metadata: LLMMetadata;
-  options: {
-    timeBetweenToken: number;
-    responseMessage: string;
-  };
-
-  constructor(options?: {
-    timeBetweenToken?: number;
-    responseMessage?: string;
-    metadata?: LLMMetadata;
-  }) {
-    this.options = {
-      timeBetweenToken: options?.timeBetweenToken ?? 20,
-      responseMessage: options?.responseMessage ?? "This is a mock response",
-    };
-    this.metadata = options?.metadata ?? {
-      model: "MockLLM",
-      temperature: 0.5,
-      topP: 0.5,
-      contextWindow: 1024,
-      tokenizer: undefined,
-    };
-  }
-
-  chat(
-    params: LLMChatParamsStreaming<object, object>,
-  ): Promise<AsyncIterable<ChatResponseChunk>>;
-  chat(
-    params: LLMChatParamsNonStreaming<object, object>,
-  ): Promise<ChatResponse<object>>;
-  async chat(
-    params:
-      | LLMChatParamsStreaming<object, object>
-      | LLMChatParamsNonStreaming<object, object>,
-  ): Promise<AsyncIterable<ChatResponseChunk> | ChatResponse<object>> {
-    const responseMessage = this.options.responseMessage;
-    const timeBetweenToken = this.options.timeBetweenToken;
-
-    if (params.stream) {
-      return (async function* () {
-        for (const char of responseMessage) {
-          yield { delta: char, raw: {} };
-          await new Promise((resolve) => setTimeout(resolve, timeBetweenToken));
-        }
-      })();
-    }
-
-    return {
-      message: { content: responseMessage, role: "assistant" },
-      raw: {},
-    };
-  }
-
-  async complete(
-    params: LLMCompletionParamsStreaming,
-  ): Promise<AsyncIterable<CompletionResponse>>;
-  async complete(
-    params: LLMCompletionParamsNonStreaming,
-  ): Promise<CompletionResponse>;
-  async complete(
-    params: LLMCompletionParamsStreaming | LLMCompletionParamsNonStreaming,
-  ): Promise<AsyncIterable<CompletionResponse> | CompletionResponse> {
-    const responseMessage = this.options.responseMessage;
-    const timeBetweenToken = this.options.timeBetweenToken;
-
-    if (params.stream) {
-      return (async function* () {
-        for (const char of responseMessage) {
-          yield { delta: char, text: char, raw: {} };
-          await new Promise((resolve) => setTimeout(resolve, timeBetweenToken));
-        }
-      })();
-    }
-
-    return { text: responseMessage, raw: {} };
-  }
-}
diff --git a/packages/core/src/utils/mock.ts b/packages/core/src/utils/mock.ts
new file mode 100644
index 000000000..2a29e775a
--- /dev/null
+++ b/packages/core/src/utils/mock.ts
@@ -0,0 +1,93 @@
+// TODO: move to a test package
+import { ToolCallLLM } from "../llms/base";
+import type {
+  ChatResponse,
+  ChatResponseChunk,
+  CompletionResponse,
+  LLMChatParamsNonStreaming,
+  LLMChatParamsStreaming,
+  LLMCompletionParamsNonStreaming,
+  LLMCompletionParamsStreaming,
+  LLMMetadata,
+} from "../llms/type";
+
+export class MockLLM extends ToolCallLLM {
+  metadata: LLMMetadata;
+  options: {
+    timeBetweenToken: number;
+    responseMessage: string;
+  };
+  supportToolCall: boolean = false;
+
+  constructor(options?: {
+    timeBetweenToken?: number;
+    responseMessage?: string;
+    metadata?: LLMMetadata;
+  }) {
+    super();
+    this.options = {
+      timeBetweenToken: options?.timeBetweenToken ?? 20,
+      responseMessage: options?.responseMessage ?? "This is a mock response",
+    };
+    this.metadata = options?.metadata ?? {
+      model: "MockLLM",
+      temperature: 0.5,
+      topP: 0.5,
+      contextWindow: 1024,
+      tokenizer: undefined,
+    };
+  }
+
+  chat(
+    params: LLMChatParamsStreaming<object, object>,
+  ): Promise<AsyncIterable<ChatResponseChunk>>;
+  chat(
+    params: LLMChatParamsNonStreaming<object, object>,
+  ): Promise<ChatResponse<object>>;
+  async chat(
+    params:
+      | LLMChatParamsStreaming<object, object>
+      | LLMChatParamsNonStreaming<object, object>,
+  ): Promise<AsyncIterable<ChatResponseChunk> | ChatResponse<object>> {
+    const responseMessage = this.options.responseMessage;
+    const timeBetweenToken = this.options.timeBetweenToken;
+
+    if (params.stream) {
+      return (async function* () {
+        for (const char of responseMessage) {
+          yield { delta: char, raw: {} };
+          await new Promise((resolve) => setTimeout(resolve, timeBetweenToken));
+        }
+      })();
+    }
+
+    return {
+      message: { content: responseMessage, role: "assistant" },
+      raw: {},
+    };
+  }
+
+  async complete(
+    params: LLMCompletionParamsStreaming,
+  ): Promise<AsyncIterable<CompletionResponse>>;
+  async complete(
+    params: LLMCompletionParamsNonStreaming,
+  ): Promise<CompletionResponse>;
+  async complete(
+    params: LLMCompletionParamsStreaming | LLMCompletionParamsNonStreaming,
+  ): Promise<AsyncIterable<CompletionResponse> | CompletionResponse> {
+    const responseMessage = this.options.responseMessage;
+    const timeBetweenToken = this.options.timeBetweenToken;
+
+    if (params.stream) {
+      return (async function* () {
+        for (const char of responseMessage) {
+          yield { delta: char, text: char, raw: {} };
+          await new Promise((resolve) => setTimeout(resolve, timeBetweenToken));
+        }
+      })();
+    }
+
+    return { text: responseMessage, raw: {} };
+  }
+}
diff --git a/packages/llamaindex/src/indices/BaseIndex.ts b/packages/llamaindex/src/indices/BaseIndex.ts
index 2d90a7fd8..7d5498093 100644
--- a/packages/llamaindex/src/indices/BaseIndex.ts
+++ b/packages/llamaindex/src/indices/BaseIndex.ts
@@ -2,15 +2,21 @@ import type {
   BaseChatEngine,
   ContextChatEngineOptions,
 } from "@llamaindex/core/chat-engine";
+import type { ToolMetadata } from "@llamaindex/core/llms";
 import type { BaseQueryEngine } from "@llamaindex/core/query-engine";
 import type { BaseSynthesizer } from "@llamaindex/core/response-synthesizers";
 import type { BaseRetriever } from "@llamaindex/core/retriever";
 import type { BaseNode, Document } from "@llamaindex/core/schema";
 import type { BaseDocumentStore } from "@llamaindex/core/storage/doc-store";
 import type { BaseIndexStore } from "@llamaindex/core/storage/index-store";
+import type { JSONSchemaType } from "ajv";
 import { runTransformations } from "../ingestion/IngestionPipeline.js";
 import { Settings } from "../Settings.js";
 import type { StorageContext } from "../storage/StorageContext.js";
+import {
+  type QueryEngineParam,
+  QueryEngineTool,
+} from "../tools/QueryEngineTool.js";
 
 export interface BaseIndexInit<T> {
   storageContext: StorageContext;
@@ -19,6 +25,24 @@ export interface BaseIndexInit<T> {
   indexStruct: T;
 }
 
+/**
+ * Common parameter type for queryTool and asQueryTool
+ */
+export type QueryToolParams = (
+  | {
+      // eslint-disable-next-line @typescript-eslint/no-explicit-any
+      options: any;
+      retriever?: never;
+    }
+  | {
+      options?: never;
+      retriever?: BaseRetriever;
+    }
+) & {
+  responseSynthesizer?: BaseSynthesizer;
+  metadata?: ToolMetadata<JSONSchemaType<QueryEngineParam>> | undefined;
+};
+
 /**
  * Indexes are the data structure that we store our nodes and embeddings in so
  * they can be retrieved for our queries.
@@ -61,6 +85,22 @@ export abstract class BaseIndex<T> {
     options?: Omit<ContextChatEngineOptions, "retriever">,
   ): BaseChatEngine;
 
+  /**
+   * Returns a query tool by calling asQueryEngine.
+   * Either options or retriever can be passed, but not both.
+   * If options are provided, they are passed to generate a retriever.
+   */
+  asQueryTool(params: QueryToolParams): QueryEngineTool {
+    if (params.options) {
+      params.retriever = this.asRetriever(params.options);
+    }
+
+    return new QueryEngineTool({
+      queryEngine: this.asQueryEngine(params),
+      metadata: params?.metadata,
+    });
+  }
+
   /**
    * Insert a document into the index.
    * @param document
@@ -76,4 +116,33 @@ export abstract class BaseIndex<T> {
     refDocId: string,
     deleteFromDocStore?: boolean,
   ): Promise<void>;
+
+  /**
+   * Alias for asRetriever
+   * @param options
+   */
+  // eslint-disable-next-line @typescript-eslint/no-explicit-any
+  retriever(options?: any): BaseRetriever {
+    return this.asRetriever(options);
+  }
+
+  /**
+   * Alias for asQueryEngine
+   * @param options you can supply your own custom Retriever and ResponseSynthesizer
+   */
+  queryEngine(options?: {
+    retriever?: BaseRetriever;
+    responseSynthesizer?: BaseSynthesizer;
+  }): BaseQueryEngine {
+    return this.asQueryEngine(options);
+  }
+
+  /**
+   * Alias for asQueryTool
+   * Either options or retriever can be passed, but not both.
+   * If options are provided, they are passed to generate a retriever.
+   */
+  queryTool(params: QueryToolParams): QueryEngineTool {
+    return this.asQueryTool(params);
+  }
 }
diff --git a/packages/llamaindex/src/tools/QueryEngineTool.ts b/packages/llamaindex/src/tools/QueryEngineTool.ts
index 30bac7a92..0fc4c3588 100644
--- a/packages/llamaindex/src/tools/QueryEngineTool.ts
+++ b/packages/llamaindex/src/tools/QueryEngineTool.ts
@@ -19,7 +19,7 @@ const DEFAULT_PARAMETERS: JSONSchemaType<QueryEngineParam> = {
 
 export type QueryEngineToolParams = {
   queryEngine: BaseQueryEngine;
-  metadata: ToolMetadata<JSONSchemaType<QueryEngineParam>>;
+  metadata?: ToolMetadata<JSONSchemaType<QueryEngineParam>> | undefined;
 };
 
 export type QueryEngineParam = {
diff --git a/packages/workflow/package.json b/packages/workflow/package.json
index 0c7c32b91..7f272a908 100644
--- a/packages/workflow/package.json
+++ b/packages/workflow/package.json
@@ -79,13 +79,15 @@
   },
   "scripts": {
     "dev": "bunchee --watch",
-    "build": "bunchee"
+    "build": "bunchee",
+    "test": "vitest run"
   },
   "devDependencies": {
     "@llamaindex/env": "workspace:*",
     "@llamaindex/core": "workspace:*",
     "@types/node": "^22.9.0",
-    "bunchee": "6.4.0"
+    "bunchee": "6.4.0",
+    "vitest": "^2.1.5"
   },
   "peerDependencies": {
     "@llamaindex/env": "workspace:*",
diff --git a/packages/workflow/src/agent/agent-workflow.ts b/packages/workflow/src/agent/agent-workflow.ts
index 5471ae746..a1a4a579d 100644
--- a/packages/workflow/src/agent/agent-workflow.ts
+++ b/packages/workflow/src/agent/agent-workflow.ts
@@ -1,8 +1,4 @@
-import type {
-  BaseToolWithCall,
-  ChatMessage,
-  ToolCallLLM,
-} from "@llamaindex/core/llms";
+import type { ChatMessage } from "@llamaindex/core/llms";
 import { ChatMemoryBuffer } from "@llamaindex/core/memory";
 import { PromptTemplate } from "@llamaindex/core/prompts";
 import { FunctionTool } from "@llamaindex/core/tools";
@@ -19,9 +15,9 @@ import {
   AgentToolCall,
   AgentToolCallResult,
 } from "./events";
-import { FunctionAgent } from "./function-agent";
+import { FunctionAgent, type FunctionAgentParams } from "./function-agent";
 
-export const DEFAULT_HANDOFF_PROMPT = new PromptTemplate({
+const DEFAULT_HANDOFF_PROMPT = new PromptTemplate({
   template: `Useful for handing off to another agent.
 If you are currently not equipped to handle the user's request, or another agent is better suited to handle the request, please hand off to the appropriate agent.
 
@@ -30,7 +26,7 @@ Currently available agents:
 `,
 });
 
-export const DEFAULT_HANDOFF_OUTPUT_PROMPT = new PromptTemplate({
+const DEFAULT_HANDOFF_OUTPUT_PROMPT = new PromptTemplate({
   template: `Agent {to_agent} is now handling the request due to the following reason: {reason}.\nPlease continue with the current request.`,
 });
 
@@ -56,17 +52,30 @@ export class AgentStepEvent extends WorkflowEvent<{
   toolCalls: AgentToolCall[];
 }> {}
 
+export type SingleAgentParams = FunctionAgentParams & {
+  /**
+   * Whether to log verbose output
+   */
+  verbose?: boolean;
+  /**
+   * Timeout for the workflow in seconds
+   */
+  timeout?: number;
+};
+
 export type AgentWorkflowParams = {
   /**
    * List of agents to include in the workflow.
    * Need at least one agent.
+   * Can also be an array of AgentWorkflow objects, in which case the agents from each workflow will be extracted.
    */
-  agents: BaseWorkflowAgent[];
+  agents: BaseWorkflowAgent[] | AgentWorkflow[];
   /**
    * The agent to start the workflow with.
    * Must be an agent in the `agents` list.
+   * Can also be an AgentWorkflow object, in which case the workflow must have exactly one agent.
    */
-  rootAgent: BaseWorkflowAgent;
+  rootAgent: BaseWorkflowAgent | AgentWorkflow;
   verbose?: boolean;
   /**
    * Timeout for the workflow in seconds.
@@ -74,6 +83,24 @@ export type AgentWorkflowParams = {
   timeout?: number;
 };
 
+/**
+ * Create a multi-agent workflow
+ * @param params - Parameters for the AgentWorkflow
+ * @returns A new AgentWorkflow instance
+ */
+export const multiAgent = (params: AgentWorkflowParams): AgentWorkflow => {
+  return new AgentWorkflow(params);
+};
+
+/**
+ * Create a simple workflow with a single agent and specified tools
+ * @param params - Parameters for the single agent workflow
+ * @returns A new AgentWorkflow instance
+ */
+export const agent = (params: SingleAgentParams): AgentWorkflow => {
+  return AgentWorkflow.fromTools(params);
+};
+
 /**
  * AgentWorkflow - An event-driven workflow for executing agents with tools
  *
@@ -93,12 +120,47 @@ export class AgentWorkflow {
       timeout: timeout ?? 60,
     });
     this.verbose = verbose ?? false;
-    this.rootAgentName = rootAgent.name;
+
+    // Handle AgentWorkflow cases for agents
+    const processedAgents: BaseWorkflowAgent[] = [];
+    if (agents.length > 0) {
+      if (agents[0] instanceof AgentWorkflow) {
+        // If agents is AgentWorkflow[], extract the BaseWorkflowAgent from each workflow
+        const agentWorkflows = agents as AgentWorkflow[];
+        agentWorkflows.forEach((workflow) => {
+          const workflowAgents = workflow.getAgents();
+          processedAgents.push(...workflowAgents);
+        });
+      } else {
+        // Otherwise, agents is already BaseWorkflowAgent[]
+        processedAgents.push(...(agents as BaseWorkflowAgent[]));
+      }
+    }
+
+    // Handle AgentWorkflow case for rootAgent and set rootAgentName
+    if (rootAgent instanceof AgentWorkflow) {
+      // If rootAgent is an AgentWorkflow, check if it has exactly one agent
+      const rootAgents = rootAgent.getAgents();
+
+      if (rootAgents.length !== 1) {
+        throw new Error(
+          `Root agent must be a single agent, but it is a workflow with ${rootAgents.length} agents`,
+        );
+      }
+
+      // We know rootAgents[0] exists because we checked length === 1 above
+      this.rootAgentName = rootAgents[0]!.name;
+    } else {
+      // Otherwise, rootAgent is already a BaseWorkflowAgent
+      this.rootAgentName = rootAgent.name;
+    }
+
     // Validate root agent
-    if (!agents.some((a) => a.name === this.rootAgentName)) {
-      throw new Error(`Root agent ${rootAgent} not found in agents`);
+    if (!processedAgents.some((a) => a.name === this.rootAgentName)) {
+      throw new Error(`Root agent ${this.rootAgentName} not found in agents`);
     }
-    this.addAgents(agents ?? []);
+
+    this.addAgents(processedAgents);
   }
 
   private validateAgent(agent: BaseWorkflowAgent) {
@@ -141,6 +203,9 @@ export class AgentWorkflow {
     });
   }
 
+  /**
+   * Adds a new agent to the workflow
+   */
   addAgent(agent: BaseWorkflowAgent): this {
     this.agents.set(agent.name, agent);
     this.validateAgent(agent);
@@ -148,35 +213,34 @@ export class AgentWorkflow {
     return this;
   }
 
+  /**
+   * Gets all agents in this workflow
+   * @returns Array of agents in this workflow
+   */
+  getAgents(): BaseWorkflowAgent[] {
+    return Array.from(this.agents.values());
+  }
+
   /**
    * Create a simple workflow with a single agent and specified tools
+   * @param params - Parameters for the single agent workflow
+   * @returns A new AgentWorkflow instance
    */
-  static fromTools({
-    tools,
-    llm,
-    systemPrompt,
-    verbose,
-    timeout,
-  }: {
-    tools: BaseToolWithCall[];
-    llm?: ToolCallLLM;
-    systemPrompt?: string;
-    verbose?: boolean;
-    timeout?: number;
-  }): AgentWorkflow {
+
+  static fromTools(params: SingleAgentParams): AgentWorkflow {
     const agent = new FunctionAgent({
-      name: "Agent",
-      description: "A single agent that uses the provided tools or functions.",
-      tools,
-      llm,
-      systemPrompt,
+      name: params.name,
+      description: params.description,
+      tools: params.tools,
+      llm: params.llm,
+      systemPrompt: params.systemPrompt,
     });
 
     const workflow = new AgentWorkflow({
       agents: [agent],
       rootAgent: agent,
-      verbose: verbose ?? false,
-      timeout: timeout ?? 60,
+      verbose: params.verbose ?? false,
+      timeout: params.timeout ?? 60,
     });
 
     return workflow;
diff --git a/packages/workflow/src/agent/function-agent.ts b/packages/workflow/src/agent/function-agent.ts
index 29b6e9d85..8576fff40 100644
--- a/packages/workflow/src/agent/function-agent.ts
+++ b/packages/workflow/src/agent/function-agent.ts
@@ -1,13 +1,14 @@
 import type { JSONObject } from "@llamaindex/core/global";
 import { Settings } from "@llamaindex/core/global";
-import type {
-  BaseToolWithCall,
-  ChatMessage,
-  ChatResponseChunk,
+import {
   ToolCallLLM,
+  type BaseToolWithCall,
+  type ChatMessage,
+  type ChatResponseChunk,
 } from "@llamaindex/core/llms";
 import { BaseMemory } from "@llamaindex/core/memory";
 import type { HandlerContext } from "../workflow-context";
+import { AgentWorkflow } from "./agent-workflow";
 import { type AgentWorkflowContext, type BaseWorkflowAgent } from "./base";
 import {
   AgentOutput,
@@ -20,7 +21,10 @@ const DEFAULT_SYSTEM_PROMPT =
   "You are a helpful assistant. Use the provided tools to answer questions.";
 
 export type FunctionAgentParams = {
-  name: string;
+  /**
+   * Agent name
+   */
+  name?: string | undefined;
   /**
    * LLM to use for the agent, required.
    */
@@ -29,15 +33,16 @@ export type FunctionAgentParams = {
    * Description of the agent, useful for task assignment.
    * Should provide the capabilities or responsibilities of the agent.
    */
-  description: string;
+  description?: string | undefined;
   /**
    * List of tools that the agent can use, requires at least one tool.
    */
   tools: BaseToolWithCall[];
   /**
    * List of agents that this agent can delegate tasks to
+   * Can be a list of agent names as strings, BaseWorkflowAgent instances, or AgentWorkflow instances
    */
-  canHandoffTo?: string[] | BaseWorkflowAgent[] | undefined;
+  canHandoffTo?: string[] | BaseWorkflowAgent[] | AgentWorkflow[] | undefined;
   /**
    * Custom system prompt for the agent
    */
@@ -60,20 +65,43 @@ export class FunctionAgent implements BaseWorkflowAgent {
     canHandoffTo,
     systemPrompt,
   }: FunctionAgentParams) {
-    this.name = name;
+    this.name = name ?? "Agent";
     this.llm = llm ?? (Settings.llm as ToolCallLLM);
-    this.description = description;
+    if (!this.llm.supportToolCall) {
+      throw new Error("FunctionAgent requires an LLM that supports tool calls");
+    }
+    this.description =
+      description ??
+      "A single agent that uses the provided tools or functions.";
     this.tools = tools;
     if (tools.length === 0) {
       throw new Error("FunctionAgent must have at least one tool");
     }
-    this.canHandoffTo =
-      Array.isArray(canHandoffTo) &&
-      canHandoffTo.every((item) => typeof item === "string")
-        ? canHandoffTo
-        : (canHandoffTo?.map((agent) =>
-            typeof agent === "string" ? agent : agent.name,
-          ) ?? []);
+    // Process canHandoffTo to extract agent names
+    this.canHandoffTo = [];
+    if (canHandoffTo) {
+      if (Array.isArray(canHandoffTo)) {
+        if (canHandoffTo.length > 0) {
+          if (typeof canHandoffTo[0] === "string") {
+            // string[] case
+            this.canHandoffTo = canHandoffTo as string[];
+          } else if (canHandoffTo[0] instanceof AgentWorkflow) {
+            // AgentWorkflow[] case
+            const workflows = canHandoffTo as AgentWorkflow[];
+            workflows.forEach((workflow) => {
+              const agentNames = workflow
+                .getAgents()
+                .map((agent) => agent.name);
+              this.canHandoffTo.push(...agentNames);
+            });
+          } else {
+            // BaseWorkflowAgent[] case
+            const agents = canHandoffTo as BaseWorkflowAgent[];
+            this.canHandoffTo = agents.map((agent) => agent.name);
+          }
+        }
+      }
+    }
     const uniqueHandoffAgents = new Set(this.canHandoffTo);
     if (uniqueHandoffAgents.size !== this.canHandoffTo.length) {
       throw new Error("Duplicate handoff agents");
diff --git a/packages/workflow/src/agent/index.ts b/packages/workflow/src/agent/index.ts
index a5e4338be..0d40a6e84 100644
--- a/packages/workflow/src/agent/index.ts
+++ b/packages/workflow/src/agent/index.ts
@@ -1,19 +1,3 @@
-export { AgentWorkflow } from "./agent-workflow";
-export type {
-  AgentInputData,
-  AgentStepEvent,
-  AgentWorkflowParams,
-  ToolCallsEvent,
-  ToolResultsEvent,
-} from "./agent-workflow";
-
-export {
-  AgentInput,
-  AgentOutput,
-  AgentSetup,
-  AgentStream,
-  AgentToolCall,
-  AgentToolCallResult,
-} from "./events";
-
-export { FunctionAgent, type FunctionAgentParams } from "./function-agent";
+export * from "./agent-workflow";
+export * from "./events";
+export * from "./function-agent";
diff --git a/unit/workflow/agent-workflow.test.ts b/packages/workflow/test/agent-workflow.test.ts
similarity index 53%
rename from unit/workflow/agent-workflow.test.ts
rename to packages/workflow/test/agent-workflow.test.ts
index 09ede8da7..e467d691c 100644
--- a/unit/workflow/agent-workflow.test.ts
+++ b/packages/workflow/test/agent-workflow.test.ts
@@ -1,20 +1,16 @@
+import { Settings } from "@llamaindex/core/global";
 import { FunctionTool } from "@llamaindex/core/tools";
-import { AgentWorkflow, FunctionAgent } from "@llamaindex/workflow/agent";
-import {
-  MockLLM,
-  Settings,
-  ToolCallLLM,
-  type JSONObject,
-  type LLMChatParamsNonStreaming,
-  type LLMChatParamsStreaming,
-} from "llamaindex";
+import { MockLLM } from "@llamaindex/core/utils";
 import { describe, expect, test, vi } from "vitest";
 import { z } from "zod";
+import { AgentWorkflow, FunctionAgent } from "../src/agent";
+import { setupToolCallingMockLLM } from "./mock";
 
 describe("AgentWorkflow", () => {
   test("agent workflow and function agent creation correctly", () => {
-    // Create a minimal mock for ToolCallLLM
-    const mockLLM = MockLLM as unknown as ToolCallLLM;
+    // Create a proper instance of MockLLM
+    const mockLLM = new MockLLM();
+    mockLLM.supportToolCall = true;
 
     // Create minimal tools
     const addTool = FunctionTool.from(
@@ -98,7 +94,6 @@ describe("AgentWorkflow", () => {
   test("single agent workflow runs correctly", async () => {
     // This test don't need to handoff
     const mockLLM = setupToolCallingMockLLM("add", { x: 1, y: 2 });
-
     Settings.llm = mockLLM;
 
     const addTool = FunctionTool.from(
@@ -117,7 +112,7 @@ describe("AgentWorkflow", () => {
 
     const workflow = AgentWorkflow.fromTools({
       tools: [addTool],
-      llm: mockLLM as unknown as ToolCallLLM,
+      llm: mockLLM,
       verbose: false,
     });
 
@@ -162,107 +157,3 @@ describe("AgentWorkflow", () => {
     //
   });
 });
-
-/**
- * Configure a mock LLM to respond with tool calling responses.
- * This version will first return a tool call, then return a response without tool calls by checking the message history.
- */
-function setupToolCallingMockLLM(toolName: string, toolKwargs: unknown) {
-  // Reset mocks
-  vi.resetAllMocks();
-
-  const mockLLM = new MockLLM({
-    responseMessage: "I'll help with that.",
-    metadata: {
-      model: "mock-model",
-      temperature: 0,
-      topP: 1,
-      contextWindow: 4096,
-      tokenizer: undefined,
-    },
-  });
-
-  let hasCalledTool = false;
-
-  // Override the chat method with our custom implementation
-  mockLLM.chat = vi
-    .fn()
-    .mockImplementation(
-      (params: LLMChatParamsStreaming | LLMChatParamsNonStreaming) => {
-        // Track if we've already returned a tool call response
-        // If we have tool results in the message history, we're in the second round
-        const hasToolResult = params.messages.some(
-          (msg) =>
-            msg.options &&
-            typeof msg.options === "object" &&
-            "toolResult" in msg.options,
-        );
-
-        if (hasToolResult || hasCalledTool) {
-          // Second response - just return a normal message without tool calls
-          hasCalledTool = false; // Reset for next run
-
-          if (params.stream === true) {
-            return Promise.resolve({
-              async *[Symbol.asyncIterator]() {
-                yield {
-                  delta: "Final response",
-                  raw: {},
-                };
-              },
-            });
-          } else {
-            return Promise.resolve({
-              message: {
-                role: "assistant",
-                content: "Final response",
-              },
-              raw: {},
-            });
-          }
-        } else {
-          // First response - return a tool call
-          hasCalledTool = true;
-
-          if (params.stream === true) {
-            return Promise.resolve({
-              async *[Symbol.asyncIterator]() {
-                yield {
-                  delta: "I'll help with that.",
-                  raw: {},
-                  options: {
-                    toolCall: [
-                      {
-                        id: "call_123",
-                        name: toolName,
-                        input: toolKwargs as JSONObject,
-                      },
-                    ],
-                  },
-                };
-              },
-            });
-          } else {
-            return Promise.resolve({
-              message: {
-                role: "assistant",
-                content: "I'll help with that.",
-                options: {
-                  toolCall: [
-                    {
-                      id: "call_123",
-                      name: toolName,
-                      input: toolKwargs as JSONObject,
-                    },
-                  ],
-                },
-              },
-              raw: {},
-            });
-          }
-        }
-      },
-    );
-
-  return mockLLM;
-}
diff --git a/unit/workflow/function-agent.test.ts b/packages/workflow/test/function-agent.test.ts
similarity index 85%
rename from unit/workflow/function-agent.test.ts
rename to packages/workflow/test/function-agent.test.ts
index 1dc658b43..ceb2b871c 100644
--- a/unit/workflow/function-agent.test.ts
+++ b/packages/workflow/test/function-agent.test.ts
@@ -1,15 +1,12 @@
-import {
-  AgentToolCallResult,
-  FunctionAgent,
-  FunctionTool,
-  MockLLM,
-  ToolCallLLM,
-  type ChatMessage,
-} from "llamaindex";
+import { ChatMessage } from "@llamaindex/core/llms";
+import { FunctionTool } from "@llamaindex/core/tools";
+import { MockLLM } from "@llamaindex/core/utils";
 import { describe, expect, test } from "vitest";
 import { z } from "zod";
+import { AgentToolCallResult, FunctionAgent } from "../src/agent";
 
-const mockLLM = new MockLLM() as unknown as ToolCallLLM;
+const mockLLM = new MockLLM();
+mockLLM.supportToolCall = true;
 
 describe("FunctionAgent", () => {
   test("function agent can parse tool call results", async () => {
diff --git a/packages/workflow/test/mock.ts b/packages/workflow/test/mock.ts
new file mode 100644
index 000000000..056a4d013
--- /dev/null
+++ b/packages/workflow/test/mock.ts
@@ -0,0 +1,116 @@
+import { JSONObject } from "@llamaindex/core/global";
+import {
+  LLMChatParamsNonStreaming,
+  LLMChatParamsStreaming,
+  ToolCallLLM,
+} from "@llamaindex/core/llms";
+import { MockLLM } from "@llamaindex/core/utils";
+import { vi } from "vitest";
+
+/**
+ * Configure a mock LLM to respond with tool calling responses.
+ * This version will first return a tool call, then return a response without tool calls by checking the message history.
+ */
+export function setupToolCallingMockLLM(
+  toolName: string,
+  toolKwargs: unknown,
+): ToolCallLLM {
+  // Reset mocks
+  vi.resetAllMocks();
+
+  const mockLLM = new MockLLM({
+    responseMessage: "I'll help with that.",
+    metadata: {
+      model: "mock-model",
+      temperature: 0,
+      topP: 1,
+      contextWindow: 4096,
+      tokenizer: undefined,
+    },
+  });
+  mockLLM.supportToolCall = true;
+
+  let hasCalledTool = false;
+
+  // Override the chat method with our custom implementation
+  mockLLM.chat = vi
+    .fn()
+    .mockImplementation(
+      (params: LLMChatParamsStreaming | LLMChatParamsNonStreaming) => {
+        // Track if we've already returned a tool call response
+        // If we have tool results in the message history, we're in the second round
+        const hasToolResult = params.messages.some(
+          (msg) =>
+            msg.options &&
+            typeof msg.options === "object" &&
+            "toolResult" in msg.options,
+        );
+
+        if (hasToolResult || hasCalledTool) {
+          // Second response - just return a normal message without tool calls
+          hasCalledTool = false; // Reset for next run
+
+          if (params.stream === true) {
+            return Promise.resolve({
+              async *[Symbol.asyncIterator]() {
+                yield {
+                  delta: "Final response",
+                  raw: {},
+                };
+              },
+            });
+          } else {
+            return Promise.resolve({
+              message: {
+                role: "assistant",
+                content: "Final response",
+              },
+              raw: {},
+            });
+          }
+        } else {
+          // First response - return a tool call
+          hasCalledTool = true;
+
+          if (params.stream === true) {
+            return Promise.resolve({
+              async *[Symbol.asyncIterator]() {
+                yield {
+                  delta: "I'll help with that.",
+                  raw: {},
+                  options: {
+                    toolCall: [
+                      {
+                        id: "call_123",
+                        name: toolName,
+                        input: toolKwargs as JSONObject,
+                      },
+                    ],
+                  },
+                };
+              },
+            });
+          } else {
+            return Promise.resolve({
+              message: {
+                role: "assistant",
+                content: "I'll help with that.",
+                options: {
+                  toolCall: [
+                    {
+                      id: "call_123",
+                      name: toolName,
+                      input: toolKwargs as JSONObject,
+                    },
+                  ],
+                },
+              },
+              raw: {},
+            });
+          }
+        }
+      },
+    );
+
+  return mockLLM;
+}
diff --git a/unit/workflow/workflow.test.ts b/packages/workflow/test/workflow.test.ts
similarity index 99%
rename from unit/workflow/workflow.test.ts
rename to packages/workflow/test/workflow.test.ts
index fd7ed5b73..6d83141ca 100644
--- a/unit/workflow/workflow.test.ts
+++ b/packages/workflow/test/workflow.test.ts
@@ -1,5 +1,3 @@
-import type { HandlerContext, StepHandler, StepParameters } from "llamaindex";
-import { StartEvent, StopEvent, Workflow, WorkflowEvent } from "llamaindex";
 import {
   beforeEach,
   describe,
@@ -9,6 +7,8 @@ import {
   vi,
   type Mocked,
 } from "vitest";
+import type { HandlerContext, StepHandler, StepParameters } from "../src";
+import { StartEvent, StopEvent, Workflow, WorkflowEvent } from "../src";
 
 class JokeEvent extends WorkflowEvent<{ joke: string }> {}
 
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index 557083c43..302574e9f 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -115,7 +115,7 @@ importers:
         version: 1.6.0(@aws-sdk/credential-provider-web-identity@3.744.0)
       ai:
         specifier: ^3.4.33
-        version: 3.4.33(openai@4.86.0(ws@8.18.0(bufferutil@4.0.9))(zod@3.24.2))(react@19.0.0)(sswr@2.1.0(svelte@5.19.10))(svelte@5.19.10)(vue@3.5.13(typescript@5.7.3))(zod@3.24.2)
+        version: 3.4.33(openai@4.86.0(zod@3.24.2))(react@19.0.0)(sswr@2.1.0(svelte@5.19.10))(svelte@5.19.10)(vue@3.5.13(typescript@5.7.3))(zod@3.24.2)
       class-variance-authority:
         specifier: ^0.7.0
         version: 0.7.1
@@ -781,7 +781,7 @@ importers:
         version: 2.10.2(@types/react@19.0.10)(react@19.0.0)
       openai:
         specifier: ^4
-        version: 4.83.0(ws@8.18.0(bufferutil@4.0.9))(zod@3.24.2)
+        version: 4.83.0(ws@8.18.0)(zod@3.24.2)
       typedoc:
         specifier: ^0.26.11
         version: 0.26.11(typescript@5.7.3)
@@ -833,7 +833,7 @@ importers:
         version: link:../../../llamaindex
       openai:
         specifier: ^4.73.1
-        version: 4.83.0(ws@8.18.0(bufferutil@4.0.9))(zod@3.24.2)
+        version: 4.83.0(ws@8.18.0)(zod@3.24.2)
     devDependencies:
       tsx:
         specifier: ^4.19.3
@@ -1309,7 +1309,7 @@ importers:
         version: link:../../env
       openai:
         specifier: ^4.86.0
-        version: 4.86.0(ws@8.18.0(bufferutil@4.0.9))(zod@3.24.2)
+        version: 4.86.0(ws@8.18.0)(zod@3.24.2)
     devDependencies:
       bunchee:
         specifier: 6.4.0
@@ -1413,7 +1413,7 @@ importers:
         version: link:../../../env
       chromadb:
         specifier: 1.10.3
-        version: 1.10.3(cohere-ai@7.14.0)(openai@4.86.0(ws@8.18.0(bufferutil@4.0.9))(zod@3.24.2))(voyageai@0.0.3-1)
+        version: 1.10.3(cohere-ai@7.14.0)(openai@4.86.0)(voyageai@0.0.3-1)
       chromadb-default-embed:
         specifier: ^2.13.2
         version: 2.13.2
@@ -1730,6 +1730,9 @@ importers:
       bunchee:
         specifier: 6.4.0
         version: 6.4.0(typescript@5.7.3)
+      vitest:
+        specifier: ^2.1.5
+        version: 2.1.5(@edge-runtime/vm@4.0.4)(@types/node@22.9.0)(happy-dom@15.11.7)(lightningcss@1.29.1)(msw@2.7.0(@types/node@22.9.0)(typescript@5.7.3))(terser@5.38.2)
 
   unit:
     dependencies:
@@ -16957,7 +16960,7 @@ snapshots:
     dependencies:
       humanize-ms: 1.2.1
 
-  ai@3.4.33(openai@4.86.0(ws@8.18.0(bufferutil@4.0.9))(zod@3.24.2))(react@19.0.0)(sswr@2.1.0(svelte@5.19.10))(svelte@5.19.10)(vue@3.5.13(typescript@5.7.3))(zod@3.24.2):
+  ai@3.4.33(openai@4.86.0(zod@3.24.2))(react@19.0.0)(sswr@2.1.0(svelte@5.19.10))(svelte@5.19.10)(vue@3.5.13(typescript@5.7.3))(zod@3.24.2):
     dependencies:
       '@ai-sdk/provider': 0.0.26
       '@ai-sdk/provider-utils': 1.0.22(zod@3.24.2)
@@ -16973,7 +16976,7 @@ snapshots:
       secure-json-parse: 2.7.0
       zod-to-json-schema: 3.24.1(zod@3.24.2)
     optionalDependencies:
-      openai: 4.86.0(ws@8.18.0(bufferutil@4.0.9))(zod@3.24.2)
+      openai: 4.86.0(ws@8.18.0)(zod@3.24.2)
       react: 19.0.0
       sswr: 2.1.0(svelte@5.19.10)
       svelte: 5.19.10
@@ -17555,13 +17558,13 @@ snapshots:
     transitivePeerDependencies:
       - bare-buffer
 
-  chromadb@1.10.3(cohere-ai@7.14.0)(openai@4.86.0(ws@8.18.0(bufferutil@4.0.9))(zod@3.24.2))(voyageai@0.0.3-1):
+  chromadb@1.10.3(cohere-ai@7.14.0)(openai@4.86.0)(voyageai@0.0.3-1):
     dependencies:
       cliui: 8.0.1
       isomorphic-fetch: 3.0.0
     optionalDependencies:
       cohere-ai: 7.14.0
-      openai: 4.86.0(ws@8.18.0(bufferutil@4.0.9))(zod@3.24.2)
+      openai: 4.86.0(ws@8.18.0)(zod@3.24.2)
       voyageai: 0.0.3-1
     transitivePeerDependencies:
       - encoding
@@ -18313,7 +18316,7 @@ snapshots:
       eslint: 9.16.0(jiti@2.4.2)
       eslint-import-resolver-node: 0.3.9
       eslint-import-resolver-typescript: 3.7.0(eslint-plugin-import@2.31.0)(eslint@9.16.0(jiti@2.4.2))
-      eslint-plugin-import: 2.31.0(@typescript-eslint/parser@8.24.0(eslint@9.16.0(jiti@2.4.2))(typescript@5.7.3))(eslint@9.16.0(jiti@2.4.2))
+      eslint-plugin-import: 2.31.0(@typescript-eslint/parser@8.24.0(eslint@9.16.0(jiti@2.4.2))(typescript@5.7.3))(eslint-import-resolver-typescript@3.7.0)(eslint@9.16.0(jiti@2.4.2))
       eslint-plugin-jsx-a11y: 6.10.2(eslint@9.16.0(jiti@2.4.2))
       eslint-plugin-react: 7.37.2(eslint@9.16.0(jiti@2.4.2))
       eslint-plugin-react-hooks: 5.1.0(eslint@9.16.0(jiti@2.4.2))
@@ -18374,7 +18377,7 @@ snapshots:
       is-glob: 4.0.3
       stable-hash: 0.0.4
     optionalDependencies:
-      eslint-plugin-import: 2.31.0(@typescript-eslint/parser@8.24.0(eslint@9.16.0(jiti@2.4.2))(typescript@5.7.3))(eslint@9.16.0(jiti@2.4.2))
+      eslint-plugin-import: 2.31.0(@typescript-eslint/parser@8.24.0(eslint@9.16.0(jiti@2.4.2))(typescript@5.7.3))(eslint-import-resolver-typescript@3.7.0)(eslint@9.16.0(jiti@2.4.2))
     transitivePeerDependencies:
       - supports-color
 
@@ -18394,7 +18397,7 @@ snapshots:
     transitivePeerDependencies:
       - supports-color
 
-  eslint-module-utils@2.12.0(@typescript-eslint/parser@8.24.0(eslint@9.16.0(jiti@2.4.2))(typescript@5.7.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.7.0)(eslint@9.16.0(jiti@2.4.2)):
+  eslint-module-utils@2.12.0(@typescript-eslint/parser@8.24.0(eslint@9.16.0(jiti@2.4.2))(typescript@5.7.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.7.0(eslint-plugin-import@2.31.0)(eslint@9.16.0(jiti@2.4.2)))(eslint@9.16.0(jiti@2.4.2)):
     dependencies:
       debug: 3.2.7
     optionalDependencies:
@@ -18405,7 +18408,7 @@ snapshots:
     transitivePeerDependencies:
       - supports-color
 
-  eslint-module-utils@2.12.0(@typescript-eslint/parser@8.24.0(eslint@9.22.0(jiti@2.4.2))(typescript@5.7.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.7.0)(eslint@9.22.0(jiti@2.4.2)):
+  eslint-module-utils@2.12.0(@typescript-eslint/parser@8.24.0(eslint@9.22.0(jiti@2.4.2))(typescript@5.7.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.7.0(eslint-plugin-import@2.31.0)(eslint@9.22.0(jiti@2.4.2)))(eslint@9.22.0(jiti@2.4.2)):
     dependencies:
       debug: 3.2.7
     optionalDependencies:
@@ -18416,7 +18419,7 @@ snapshots:
     transitivePeerDependencies:
       - supports-color
 
-  eslint-plugin-import@2.31.0(@typescript-eslint/parser@8.24.0(eslint@9.16.0(jiti@2.4.2))(typescript@5.7.3))(eslint@9.16.0(jiti@2.4.2)):
+  eslint-plugin-import@2.31.0(@typescript-eslint/parser@8.24.0(eslint@9.16.0(jiti@2.4.2))(typescript@5.7.3))(eslint-import-resolver-typescript@3.7.0)(eslint@9.16.0(jiti@2.4.2)):
     dependencies:
       '@rtsao/scc': 1.1.0
       array-includes: 3.1.8
@@ -18427,7 +18430,7 @@ snapshots:
       doctrine: 2.1.0
       eslint: 9.16.0(jiti@2.4.2)
       eslint-import-resolver-node: 0.3.9
-      eslint-module-utils: 2.12.0(@typescript-eslint/parser@8.24.0(eslint@9.16.0(jiti@2.4.2))(typescript@5.7.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.7.0)(eslint@9.16.0(jiti@2.4.2))
+      eslint-module-utils: 2.12.0(@typescript-eslint/parser@8.24.0(eslint@9.16.0(jiti@2.4.2))(typescript@5.7.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.7.0(eslint-plugin-import@2.31.0)(eslint@9.16.0(jiti@2.4.2)))(eslint@9.16.0(jiti@2.4.2))
       hasown: 2.0.2
       is-core-module: 2.16.1
       is-glob: 4.0.3
@@ -18456,7 +18459,7 @@ snapshots:
       doctrine: 2.1.0
       eslint: 9.22.0(jiti@2.4.2)
       eslint-import-resolver-node: 0.3.9
-      eslint-module-utils: 2.12.0(@typescript-eslint/parser@8.24.0(eslint@9.22.0(jiti@2.4.2))(typescript@5.7.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.7.0)(eslint@9.22.0(jiti@2.4.2))
+      eslint-module-utils: 2.12.0(@typescript-eslint/parser@8.24.0(eslint@9.22.0(jiti@2.4.2))(typescript@5.7.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.7.0(eslint-plugin-import@2.31.0)(eslint@9.22.0(jiti@2.4.2)))(eslint@9.22.0(jiti@2.4.2))
       hasown: 2.0.2
       is-core-module: 2.16.1
       is-glob: 4.0.3
@@ -21989,7 +21992,7 @@ snapshots:
       is-docker: 2.2.1
       is-wsl: 2.2.0
 
-  openai@4.83.0(ws@8.18.0(bufferutil@4.0.9))(zod@3.24.2):
+  openai@4.83.0(ws@8.18.0)(zod@3.24.2):
     dependencies:
       '@types/node': 18.19.75
       '@types/node-fetch': 2.6.12
@@ -22004,7 +22007,7 @@ snapshots:
     transitivePeerDependencies:
       - encoding
 
-  openai@4.86.0(ws@8.18.0(bufferutil@4.0.9))(zod@3.24.2):
+  openai@4.86.0(ws@8.18.0)(zod@3.24.2):
     dependencies:
       '@types/node': 18.19.76
       '@types/node-fetch': 2.6.12
-- 
GitLab