From 8b7fdba544086bfc9839267636997c66ce2c8ba4 Mon Sep 17 00:00:00 2001
From: Alex Yang <himself65@outlook.com>
Date: Mon, 23 Sep 2024 13:26:26 -0700
Subject: [PATCH] refactor: move chat engine & retriever into core (#1242)

---
 .changeset/four-beers-kick.md                 |  12 ++
 README.md                                     |  12 +-
 examples/agent/azure_dynamic_session.ts       |   1 -
 examples/agent/stream_openai_agent.ts         |  10 +-
 examples/agent/wiki.ts                        |  10 +-
 examples/anthropic/chat_interactive.ts        |   4 +-
 examples/chatEngine.ts                        |   2 +-
 examples/chatHistory.ts                       |  12 +-
 examples/cloud/chat.ts                        |   2 +-
 examples/multimodal/context.ts                |   4 +-
 examples/multimodal/rag.ts                    |   4 +-
 .../autotool/examples/02_nextjs/actions.ts    |  10 +-
 packages/core/package.json                    |  28 +++++
 packages/core/src/chat-engine/index.ts        |  28 +++++
 .../src/global/settings/callback-manager.ts   |   3 +
 packages/core/src/query-engine/base.ts        |   8 +-
 packages/core/src/retriever/index.ts          | 112 ++++++++++++++++++
 .../cloudflare-worker-agent/src/index.ts      |  10 +-
 .../nextjs-agent/src/actions/index.tsx        |  12 +-
 packages/llamaindex/e2e/node/openai.e2e.ts    |  10 +-
 packages/llamaindex/e2e/node/react.e2e.ts     |  11 +-
 packages/llamaindex/src/Retriever.ts          |  20 ----
 packages/llamaindex/src/agent/anthropic.ts    |  17 +--
 packages/llamaindex/src/agent/base.ts         |  28 ++---
 .../llamaindex/src/cloud/LlamaCloudIndex.ts   |   2 +-
 .../src/cloud/LlamaCloudRetriever.ts          |  28 ++---
 .../chat/CondenseQuestionChatEngine.ts        |  38 +++---
 .../src/engines/chat/ContextChatEngine.ts     |  37 +++---
 .../engines/chat/DefaultContextGenerator.ts   |   2 +-
 .../src/engines/chat/SimpleChatEngine.ts      |  33 +++---
 packages/llamaindex/src/engines/chat/types.ts |  52 +-------
 .../src/engines/query/RetrieverQueryEngine.ts |  20 ++--
 packages/llamaindex/src/index.edge.ts         |   6 +-
 packages/llamaindex/src/indices/BaseIndex.ts  |   2 +-
 .../llamaindex/src/indices/keyword/index.ts   |  12 +-
 .../llamaindex/src/indices/summary/index.ts   |  16 +--
 .../src/indices/vectorStore/index.ts          |  29 ++---
 packages/llamaindex/src/llm/types.ts          |  10 --
 packages/llamaindex/src/objects/base.ts       |   7 +-
 39 files changed, 387 insertions(+), 277 deletions(-)
 create mode 100644 .changeset/four-beers-kick.md
 create mode 100644 packages/core/src/chat-engine/index.ts
 create mode 100644 packages/core/src/retriever/index.ts
 delete mode 100644 packages/llamaindex/src/Retriever.ts

diff --git a/.changeset/four-beers-kick.md b/.changeset/four-beers-kick.md
new file mode 100644
index 000000000..4e591e3c3
--- /dev/null
+++ b/.changeset/four-beers-kick.md
@@ -0,0 +1,12 @@
+---
+"@llamaindex/core": minor
+"llamaindex": minor
+---
+
+refactor: move chat engine & retriever into core.
+
+This is a breaking change since `stream` option has moved to second parameter.
+
+- `chat` API in BaseChatEngine has changed, Move `stream` option into second parameter.
+- `chatHistory` in BaseChatEngine now returns `ChatMessage[] | Promise<ChatMessage[]>`, instead of `BaseMemory`
+- update `retrieve-end` type
diff --git a/README.md b/README.md
index 227d85912..48ba729b1 100644
--- a/README.md
+++ b/README.md
@@ -167,11 +167,13 @@ export async function chatWithAgent(
       // ... adding your tools here
     ],
   });
-  const responseStream = await agent.chat({
-    stream: true,
-    message: question,
-    chatHistory: prevMessages,
-  });
+  const responseStream = await agent.chat(
+    {
+      message: question,
+      chatHistory: prevMessages,
+    },
+    true,
+  );
   const uiStream = createStreamableUI(<div>loading...</div>);
   responseStream
     .pipeTo(
diff --git a/examples/agent/azure_dynamic_session.ts b/examples/agent/azure_dynamic_session.ts
index 31d5375c7..72dbb77e6 100644
--- a/examples/agent/azure_dynamic_session.ts
+++ b/examples/agent/azure_dynamic_session.ts
@@ -42,7 +42,6 @@ async function main() {
   const response = await agent.chat({
     message:
       "plot a chart of 5 random numbers and save it to /mnt/data/chart.png",
-    stream: false,
   });
 
   // Print the response
diff --git a/examples/agent/stream_openai_agent.ts b/examples/agent/stream_openai_agent.ts
index 4d8d6e8fc..19dfcca17 100644
--- a/examples/agent/stream_openai_agent.ts
+++ b/examples/agent/stream_openai_agent.ts
@@ -61,10 +61,12 @@ async function main() {
     tools: [functionTool, functionTool2],
   });
 
-  const stream = await agent.chat({
-    message: "Divide 16 by 2 then add 20",
-    stream: true,
-  });
+  const stream = await agent.chat(
+    {
+      message: "Divide 16 by 2 then add 20",
+    },
+    true,
+  );
 
   console.log("Response:");
 
diff --git a/examples/agent/wiki.ts b/examples/agent/wiki.ts
index e4100e990..1ec98652b 100644
--- a/examples/agent/wiki.ts
+++ b/examples/agent/wiki.ts
@@ -11,10 +11,12 @@ async function main() {
   });
 
   // Chat with the agent
-  const response = await agent.chat({
-    message: "Who was Goethe?",
-    stream: true,
-  });
+  const response = await agent.chat(
+    {
+      message: "Who was Goethe?",
+    },
+    true,
+  );
 
   for await (const { delta } of response) {
     process.stdout.write(delta);
diff --git a/examples/anthropic/chat_interactive.ts b/examples/anthropic/chat_interactive.ts
index 4565c70e4..9579cd7a4 100644
--- a/examples/anthropic/chat_interactive.ts
+++ b/examples/anthropic/chat_interactive.ts
@@ -18,14 +18,14 @@ import readline from "node:readline/promises";
   });
   const chatEngine = new SimpleChatEngine({
     llm,
-    chatHistory,
+    memory: chatHistory,
   });
   const rl = readline.createInterface({ input, output });
 
   while (true) {
     const query = await rl.question("User: ");
     process.stdout.write("Assistant: ");
-    const stream = await chatEngine.chat({ message: query, stream: true });
+    const stream = await chatEngine.chat({ message: query }, true);
     for await (const chunk of stream) {
       process.stdout.write(chunk.response);
     }
diff --git a/examples/chatEngine.ts b/examples/chatEngine.ts
index addd025bb..da24e2251 100644
--- a/examples/chatEngine.ts
+++ b/examples/chatEngine.ts
@@ -24,7 +24,7 @@ async function main() {
 
   while (true) {
     const query = await rl.question("Query: ");
-    const stream = await chatEngine.chat({ message: query, stream: true });
+    const stream = await chatEngine.chat({ message: query }, true);
     console.log();
     for await (const chunk of stream) {
       process.stdout.write(chunk.response);
diff --git a/examples/chatHistory.ts b/examples/chatHistory.ts
index c55c618d6..14e12a31b 100644
--- a/examples/chatHistory.ts
+++ b/examples/chatHistory.ts
@@ -24,11 +24,13 @@ async function main() {
 
   while (true) {
     const query = await rl.question("Query: ");
-    const stream = await chatEngine.chat({
-      message: query,
-      chatHistory,
-      stream: true,
-    });
+    const stream = await chatEngine.chat(
+      {
+        message: query,
+        chatHistory,
+      },
+      true,
+    );
     if (chatHistory.getLastSummary()) {
       // Print the summary of the conversation so far that is produced by the SummaryChatHistory
       console.log(`Summary: ${chatHistory.getLastSummary()?.content}`);
diff --git a/examples/cloud/chat.ts b/examples/cloud/chat.ts
index d6fdce572..c39224ae0 100644
--- a/examples/cloud/chat.ts
+++ b/examples/cloud/chat.ts
@@ -18,7 +18,7 @@ async function main() {
 
   while (true) {
     const query = await rl.question("User: ");
-    const stream = await chatEngine.chat({ message: query, stream: true });
+    const stream = await chatEngine.chat({ message: query }, true);
     for await (const chunk of stream) {
       process.stdout.write(chunk.response);
     }
diff --git a/examples/multimodal/context.ts b/examples/multimodal/context.ts
index c4a28646c..bfe0c7daa 100644
--- a/examples/multimodal/context.ts
+++ b/examples/multimodal/context.ts
@@ -1,4 +1,5 @@
 // call pnpm tsx multimodal/load.ts first to init the storage
+import { extractText } from "@llamaindex/core/utils";
 import {
   ContextChatEngine,
   NodeWithScore,
@@ -25,8 +26,9 @@ Settings.callbackManager.on("retrieve-end", (event) => {
   const textNodes = nodes.filter(
     (node: NodeWithScore) => node.node.type === ObjectType.TEXT,
   );
+  const text = extractText(query);
   console.log(
-    `Retrieved ${textNodes.length} text nodes and ${imageNodes.length} image nodes for query: ${query}`,
+    `Retrieved ${textNodes.length} text nodes and ${imageNodes.length} image nodes for query: ${text}`,
   );
 });
 
diff --git a/examples/multimodal/rag.ts b/examples/multimodal/rag.ts
index 8ac66ffa8..bbe8a6296 100644
--- a/examples/multimodal/rag.ts
+++ b/examples/multimodal/rag.ts
@@ -1,3 +1,4 @@
+import { extractText } from "@llamaindex/core/utils";
 import {
   getResponseSynthesizer,
   OpenAI,
@@ -16,7 +17,8 @@ Settings.llm = new OpenAI({ model: "gpt-4-turbo", maxTokens: 512 });
 // Update callbackManager
 Settings.callbackManager.on("retrieve-end", (event) => {
   const { nodes, query } = event.detail;
-  console.log(`Retrieved ${nodes.length} nodes for query: ${query}`);
+  const text = extractText(query);
+  console.log(`Retrieved ${nodes.length} nodes for query: ${text}`);
 });
 
 async function main() {
diff --git a/packages/autotool/examples/02_nextjs/actions.ts b/packages/autotool/examples/02_nextjs/actions.ts
index 3c5d12a48..b6738a230 100644
--- a/packages/autotool/examples/02_nextjs/actions.ts
+++ b/packages/autotool/examples/02_nextjs/actions.ts
@@ -14,10 +14,12 @@ export async function chatWithAI(message: string): Promise<ReactNode> {
   const uiStream = createStreamableUI();
   runWithStreamableUI(uiStream, () =>
     agent
-      .chat({
-        stream: true,
-        message,
-      })
+      .chat(
+        {
+          message,
+        },
+        true,
+      )
       .then(async (responseStream) => {
         return responseStream.pipeTo(
           new WritableStream({
diff --git a/packages/core/package.json b/packages/core/package.json
index 403fd6073..b7f799d8a 100644
--- a/packages/core/package.json
+++ b/packages/core/package.json
@@ -199,6 +199,34 @@
         "types": "./dist/response-synthesizers/index.d.ts",
         "default": "./dist/response-synthesizers/index.js"
       }
+    },
+    "./chat-engine": {
+      "require": {
+        "types": "./dist/chat-engine/index.d.cts",
+        "default": "./dist/chat-engine/index.cjs"
+      },
+      "import": {
+        "types": "./dist/chat-engine/index.d.ts",
+        "default": "./dist/chat-engine/index.js"
+      },
+      "default": {
+        "types": "./dist/chat-engine/index.d.ts",
+        "default": "./dist/chat-engine/index.js"
+      }
+    },
+    "./retriever": {
+      "require": {
+        "types": "./dist/retriever/index.d.cts",
+        "default": "./dist/retriever/index.cjs"
+      },
+      "import": {
+        "types": "./dist/retriever/index.d.ts",
+        "default": "./dist/retriever/index.js"
+      },
+      "default": {
+        "types": "./dist/retriever/index.d.ts",
+        "default": "./dist/retriever/index.js"
+      }
     }
   },
   "files": [
diff --git a/packages/core/src/chat-engine/index.ts b/packages/core/src/chat-engine/index.ts
new file mode 100644
index 000000000..07d7b1223
--- /dev/null
+++ b/packages/core/src/chat-engine/index.ts
@@ -0,0 +1,28 @@
+import type { ChatMessage, MessageContent } from "../llms";
+import type { BaseMemory } from "../memory";
+import { EngineResponse } from "../schema";
+
+export interface ChatEngineParams<
+  AdditionalMessageOptions extends object = object,
+> {
+  message: MessageContent;
+  /**
+   * Optional chat history if you want to customize the chat history.
+   */
+  chatHistory?:
+    | ChatMessage<AdditionalMessageOptions>[]
+    | BaseMemory<AdditionalMessageOptions>;
+}
+
+export abstract class BaseChatEngine {
+  abstract chat(
+    params: ChatEngineParams,
+    stream?: false,
+  ): Promise<EngineResponse>;
+  abstract chat(
+    params: ChatEngineParams,
+    stream: true,
+  ): Promise<AsyncIterable<EngineResponse>>;
+
+  abstract chatHistory: ChatMessage[] | Promise<ChatMessage[]>;
+}
diff --git a/packages/core/src/global/settings/callback-manager.ts b/packages/core/src/global/settings/callback-manager.ts
index 52ceba772..bce2f7237 100644
--- a/packages/core/src/global/settings/callback-manager.ts
+++ b/packages/core/src/global/settings/callback-manager.ts
@@ -11,6 +11,7 @@ import type {
   SynthesizeEndEvent,
   SynthesizeStartEvent,
 } from "../../response-synthesizers";
+import type { RetrieveEndEvent, RetrieveStartEvent } from "../../retriever";
 import { TextNode } from "../../schema";
 import { EventCaller, getEventCaller } from "../../utils";
 import type { UUID } from "../type";
@@ -69,6 +70,8 @@ export interface LlamaIndexEventMaps {
   "query-end": QueryEndEvent;
   "synthesize-start": SynthesizeStartEvent;
   "synthesize-end": SynthesizeEndEvent;
+  "retrieve-start": RetrieveStartEvent;
+  "retrieve-end": RetrieveEndEvent;
 }
 
 export class LlamaIndexCustomEvent<T = any> extends CustomEvent<T> {
diff --git a/packages/core/src/query-engine/base.ts b/packages/core/src/query-engine/base.ts
index 6b9d0b08e..db079b284 100644
--- a/packages/core/src/query-engine/base.ts
+++ b/packages/core/src/query-engine/base.ts
@@ -2,7 +2,7 @@ import { randomUUID } from "@llamaindex/env";
 import { Settings } from "../global";
 import type { MessageContent } from "../llms";
 import { PromptMixin } from "../prompts";
-import { EngineResponse } from "../schema";
+import { EngineResponse, type NodeWithScore } from "../schema";
 import { wrapEventCaller } from "../utils";
 
 /**
@@ -28,6 +28,12 @@ export abstract class BaseQueryEngine extends PromptMixin {
     super();
   }
 
+  async retrieve(params: QueryType): Promise<NodeWithScore[]> {
+    throw new Error(
+      "This query engine does not support retrieve, use query directly",
+    );
+  }
+
   query(
     strOrQueryBundle: QueryType,
     stream: true,
diff --git a/packages/core/src/retriever/index.ts b/packages/core/src/retriever/index.ts
new file mode 100644
index 000000000..176a98c82
--- /dev/null
+++ b/packages/core/src/retriever/index.ts
@@ -0,0 +1,112 @@
+import { randomUUID } from "@llamaindex/env";
+import { Settings } from "../global";
+import type { MessageContent } from "../llms";
+import { PromptMixin } from "../prompts";
+import type { QueryBundle, QueryType } from "../query-engine";
+import { BaseNode, IndexNode, type NodeWithScore, ObjectType } from "../schema";
+
+export type RetrieveParams = {
+  query: MessageContent;
+  preFilters?: unknown;
+};
+
+export type RetrieveStartEvent = {
+  id: string;
+  query: QueryBundle;
+};
+
+export type RetrieveEndEvent = {
+  id: string;
+  query: QueryBundle;
+  nodes: NodeWithScore[];
+};
+
+export abstract class BaseRetriever extends PromptMixin {
+  objectMap: Map<string, unknown> = new Map();
+
+  protected _updatePrompts() {}
+  protected _getPrompts() {
+    return {};
+  }
+
+  protected _getPromptModules() {
+    return {};
+  }
+
+  protected constructor() {
+    super();
+  }
+
+  public async retrieve(params: QueryType): Promise<NodeWithScore[]> {
+    const cb = Settings.callbackManager;
+    const queryBundle = typeof params === "string" ? { query: params } : params;
+    const id = randomUUID();
+    cb.dispatchEvent("retrieve-start", { id, query: queryBundle });
+    let response = await this._retrieve(queryBundle);
+    response = await this._handleRecursiveRetrieval(queryBundle, response);
+    cb.dispatchEvent("retrieve-end", {
+      id,
+      query: queryBundle,
+      nodes: response,
+    });
+    return response;
+  }
+
+  abstract _retrieve(params: QueryBundle): Promise<NodeWithScore[]>;
+
+  async _handleRecursiveRetrieval(
+    params: QueryBundle,
+    nodes: NodeWithScore[],
+  ): Promise<NodeWithScore[]> {
+    const retrievedNodes = [];
+    for (const { node, score = 1.0 } of nodes) {
+      if (node.type === ObjectType.INDEX) {
+        const indexNode = node as IndexNode;
+        const object = this.objectMap.get(indexNode.indexId);
+        if (object !== undefined) {
+          retrievedNodes.push(
+            ...this._retrieveFromObject(object, params, score),
+          );
+        } else {
+          retrievedNodes.push({ node, score });
+        }
+      } else {
+        retrievedNodes.push({ node, score });
+      }
+    }
+    return nodes;
+  }
+
+  _retrieveFromObject(
+    object: unknown,
+    queryBundle: QueryBundle,
+    score: number,
+  ): NodeWithScore[] {
+    if (object == null) {
+      throw new TypeError("Object is not retrievable");
+    }
+    if (typeof object !== "object") {
+      throw new TypeError("Object is not retrievable");
+    }
+    if ("node" in object && object.node instanceof BaseNode) {
+      return [
+        {
+          node: object.node,
+          score:
+            "score" in object && typeof object.score === "number"
+              ? object.score
+              : score,
+        },
+      ];
+    }
+    if (object instanceof BaseNode) {
+      return [{ node: object, score }];
+    } else {
+      // todo: support other types
+      // BaseQueryEngine
+      // BaseRetriever
+      // QueryComponent
+      throw new TypeError("Object is not retrievable");
+    }
+  }
+}
diff --git a/packages/llamaindex/e2e/examples/cloudflare-worker-agent/src/index.ts b/packages/llamaindex/e2e/examples/cloudflare-worker-agent/src/index.ts
index 8a283cc6b..ce80b00b2 100644
--- a/packages/llamaindex/e2e/examples/cloudflare-worker-agent/src/index.ts
+++ b/packages/llamaindex/e2e/examples/cloudflare-worker-agent/src/index.ts
@@ -11,10 +11,12 @@ export default {
       tools: [],
     });
     console.log(1);
-    const responseStream = await agent.chat({
-      stream: true,
-      message: "Hello? What is the weather today?",
-    });
+    const responseStream = await agent.chat(
+      {
+        message: "Hello? What is the weather today?",
+      },
+      true,
+    );
     console.log(2);
     const textEncoder = new TextEncoder();
     const response = responseStream.pipeThrough<Uint8Array>(
diff --git a/packages/llamaindex/e2e/examples/nextjs-agent/src/actions/index.tsx b/packages/llamaindex/e2e/examples/nextjs-agent/src/actions/index.tsx
index b71e52a92..f648a8d9d 100644
--- a/packages/llamaindex/e2e/examples/nextjs-agent/src/actions/index.tsx
+++ b/packages/llamaindex/e2e/examples/nextjs-agent/src/actions/index.tsx
@@ -10,11 +10,13 @@ export async function chatWithAgent(
   const agent = new OpenAIAgent({
     tools: [],
   });
-  const responseStream = await agent.chat({
-    stream: true,
-    message: question,
-    chatHistory: prevMessages,
-  });
+  const responseStream = await agent.chat(
+    {
+      message: question,
+      chatHistory: prevMessages,
+    },
+    true,
+  );
   const uiStream = createStreamableUI(<div>loading...</div>);
   responseStream
     .pipeTo(
diff --git a/packages/llamaindex/e2e/node/openai.e2e.ts b/packages/llamaindex/e2e/node/openai.e2e.ts
index 401939085..eb9918eef 100644
--- a/packages/llamaindex/e2e/node/openai.e2e.ts
+++ b/packages/llamaindex/e2e/node/openai.e2e.ts
@@ -322,10 +322,12 @@ await test("agent stream", async (t) => {
       tools: [sumNumbersTool, divideNumbersTool],
     });
 
-    const stream = await agent.chat({
-      message: "Divide 16 by 2 then add 20",
-      stream: true,
-    });
+    const stream = await agent.chat(
+      {
+        message: "Divide 16 by 2 then add 20",
+      },
+      true,
+    );
 
     let message = "";
 
diff --git a/packages/llamaindex/e2e/node/react.e2e.ts b/packages/llamaindex/e2e/node/react.e2e.ts
index c0bb8c46c..58170a57f 100644
--- a/packages/llamaindex/e2e/node/react.e2e.ts
+++ b/packages/llamaindex/e2e/node/react.e2e.ts
@@ -20,7 +20,6 @@ await test("react agent", async (t) => {
       tools: [getWeatherTool],
     });
     const response = await agent.chat({
-      stream: false,
       message: "What is the weather like in San Francisco?",
     });
 
@@ -35,10 +34,12 @@ await test("react agent stream", async (t) => {
       tools: [getWeatherTool],
     });
 
-    const stream = await agent.chat({
-      stream: true,
-      message: "What is the weather like in San Francisco?",
-    });
+    const stream = await agent.chat(
+      {
+        message: "What is the weather like in San Francisco?",
+      },
+      true,
+    );
 
     let content = "";
     for await (const response of stream) {
diff --git a/packages/llamaindex/src/Retriever.ts b/packages/llamaindex/src/Retriever.ts
deleted file mode 100644
index b7ef4cf04..000000000
--- a/packages/llamaindex/src/Retriever.ts
+++ /dev/null
@@ -1,20 +0,0 @@
-import type { NodeWithScore } from "@llamaindex/core/schema";
-import type { ServiceContext } from "./ServiceContext.js";
-import type { MessageContent } from "./index.edge.js";
-
-export type RetrieveParams = {
-  query: MessageContent;
-  preFilters?: unknown;
-};
-
-/**
- * Retrievers retrieve the nodes that most closely match our query in similarity.
- */
-export interface BaseRetriever {
-  retrieve(params: RetrieveParams): Promise<NodeWithScore[]>;
-
-  /**
-   * @deprecated to be deprecated soon
-   */
-  serviceContext?: ServiceContext | undefined;
-}
diff --git a/packages/llamaindex/src/agent/anthropic.ts b/packages/llamaindex/src/agent/anthropic.ts
index 8f17b360d..e8c827b68 100644
--- a/packages/llamaindex/src/agent/anthropic.ts
+++ b/packages/llamaindex/src/agent/anthropic.ts
@@ -1,9 +1,6 @@
+import type { ChatEngineParams } from "@llamaindex/core/chat-engine";
+import type { EngineResponse } from "@llamaindex/core/schema";
 import { Settings } from "../Settings.js";
-import type {
-  ChatEngineParamsNonStreaming,
-  ChatEngineParamsStreaming,
-  EngineResponse,
-} from "../index.edge.js";
 import { Anthropic } from "../llm/anthropic.js";
 import { LLMAgent, LLMAgentWorker, type LLMAgentParams } from "./llm.js";
 
@@ -24,12 +21,10 @@ export class AnthropicAgent extends LLMAgent {
     });
   }
 
-  async chat(params: ChatEngineParamsNonStreaming): Promise<EngineResponse>;
-  async chat(params: ChatEngineParamsStreaming): Promise<never>;
-  override async chat(
-    params: ChatEngineParamsNonStreaming | ChatEngineParamsStreaming,
-  ) {
-    if (params.stream) {
+  async chat(params: ChatEngineParams, stream?: false): Promise<EngineResponse>;
+  async chat(params: ChatEngineParams, stream: true): Promise<never>;
+  override async chat(params: ChatEngineParams, stream?: boolean) {
+    if (stream) {
       // Anthropic does support this, but looks like it's not supported in the LITS LLM
       throw new Error("Anthropic does not support streaming");
     }
diff --git a/packages/llamaindex/src/agent/base.ts b/packages/llamaindex/src/agent/base.ts
index 2715b4ffe..8141ceac4 100644
--- a/packages/llamaindex/src/agent/base.ts
+++ b/packages/llamaindex/src/agent/base.ts
@@ -1,3 +1,7 @@
+import {
+  BaseChatEngine,
+  type ChatEngineParams,
+} from "@llamaindex/core/chat-engine";
 import type {
   BaseToolWithCall,
   ChatMessage,
@@ -10,11 +14,6 @@ import { EngineResponse } from "@llamaindex/core/schema";
 import { wrapEventCaller } from "@llamaindex/core/utils";
 import { randomUUID } from "@llamaindex/env";
 import { Settings } from "../Settings.js";
-import {
-  type ChatEngine,
-  type ChatEngineParamsNonStreaming,
-  type ChatEngineParamsStreaming,
-} from "../engines/chat/index.js";
 import { consoleLogger, emptyLogger } from "../internal/logger.js";
 import { isReadableStream } from "../internal/utils.js";
 import { ObjectRetriever } from "../objects/index.js";
@@ -207,8 +206,7 @@ export abstract class AgentRunner<
   >
     ? AdditionalMessageOptions
     : never,
-> implements ChatEngine
-{
+> extends BaseChatEngine {
   readonly #llm: AI;
   readonly #tools:
     | BaseToolWithCall[]
@@ -259,6 +257,7 @@ export abstract class AgentRunner<
   protected constructor(
     params: AgentRunnerParams<AI, Store, AdditionalMessageOptions>,
   ) {
+    super();
     const { llm, chatHistory, systemPrompt, runner, tools, verbose } = params;
     this.#llm = llm;
     this.#chatHistory = chatHistory;
@@ -345,13 +344,15 @@ export abstract class AgentRunner<
     });
   }
 
-  async chat(params: ChatEngineParamsNonStreaming): Promise<EngineResponse>;
+  async chat(params: ChatEngineParams, stream?: false): Promise<EngineResponse>;
   async chat(
-    params: ChatEngineParamsStreaming,
+    params: ChatEngineParams,
+    stream: true,
   ): Promise<ReadableStream<EngineResponse>>;
   @wrapEventCaller
   async chat(
-    params: ChatEngineParamsNonStreaming | ChatEngineParamsStreaming,
+    params: ChatEngineParams,
+    stream?: boolean,
   ): Promise<EngineResponse | ReadableStream<EngineResponse>> {
     let chatHistory: ChatMessage<AdditionalMessageOptions>[] = [];
 
@@ -363,12 +364,7 @@ export abstract class AgentRunner<
         params.chatHistory as ChatMessage<AdditionalMessageOptions>[];
     }
 
-    const task = this.createTask(
-      params.message,
-      !!params.stream,
-      false,
-      chatHistory,
-    );
+    const task = this.createTask(params.message, !!stream, false, chatHistory);
     for await (const stepOutput of task) {
       // update chat history for each round
       this.#chatHistory = [...stepOutput.taskStep.context.store.messages];
diff --git a/packages/llamaindex/src/cloud/LlamaCloudIndex.ts b/packages/llamaindex/src/cloud/LlamaCloudIndex.ts
index 580189174..e3509b296 100644
--- a/packages/llamaindex/src/cloud/LlamaCloudIndex.ts
+++ b/packages/llamaindex/src/cloud/LlamaCloudIndex.ts
@@ -1,7 +1,6 @@
 import type { BaseQueryEngine } from "@llamaindex/core/query-engine";
 import type { BaseSynthesizer } from "@llamaindex/core/response-synthesizers";
 import type { Document, TransformComponent } from "@llamaindex/core/schema";
-import type { BaseRetriever } from "../Retriever.js";
 import { RetrieverQueryEngine } from "../engines/query/RetrieverQueryEngine.js";
 import type { BaseNodePostprocessor } from "../postprocessors/types.js";
 import type { CloudRetrieveParams } from "./LlamaCloudRetriever.js";
@@ -12,6 +11,7 @@ import { getAppBaseUrl, getProjectId, initService } from "./utils.js";
 
 import { PipelinesService, ProjectsService } from "@llamaindex/cloud/api";
 import { SentenceSplitter } from "@llamaindex/core/node-parser";
+import type { BaseRetriever } from "@llamaindex/core/retriever";
 import { getEnv } from "@llamaindex/env";
 import { OpenAIEmbedding } from "@llamaindex/openai";
 import { Settings } from "../Settings.js";
diff --git a/packages/llamaindex/src/cloud/LlamaCloudRetriever.ts b/packages/llamaindex/src/cloud/LlamaCloudRetriever.ts
index 3d41ae76c..ebdf0b0bb 100644
--- a/packages/llamaindex/src/cloud/LlamaCloudRetriever.ts
+++ b/packages/llamaindex/src/cloud/LlamaCloudRetriever.ts
@@ -4,11 +4,12 @@ import {
   type RetrievalParams,
   type TextNodeWithScore,
 } from "@llamaindex/cloud/api";
-import { DEFAULT_PROJECT_NAME, Settings } from "@llamaindex/core/global";
+import { DEFAULT_PROJECT_NAME } from "@llamaindex/core/global";
+import type { QueryBundle } from "@llamaindex/core/query-engine";
+import { BaseRetriever } from "@llamaindex/core/retriever";
 import type { NodeWithScore } from "@llamaindex/core/schema";
 import { jsonToNode, ObjectType } from "@llamaindex/core/schema";
-import { extractText, wrapEventCaller } from "@llamaindex/core/utils";
-import type { BaseRetriever, RetrieveParams } from "../Retriever.js";
+import { extractText } from "@llamaindex/core/utils";
 import type { ClientParams, CloudConstructorParams } from "./type.js";
 import { getProjectId, initService } from "./utils.js";
 
@@ -17,7 +18,7 @@ export type CloudRetrieveParams = Omit<
   "query" | "search_filters" | "dense_similarity_top_k"
 > & { similarityTopK?: number; filters?: MetadataFilters };
 
-export class LlamaCloudRetriever implements BaseRetriever {
+export class LlamaCloudRetriever extends BaseRetriever {
   clientParams: ClientParams;
   retrieveParams: CloudRetrieveParams;
   organizationId?: string;
@@ -42,6 +43,7 @@ export class LlamaCloudRetriever implements BaseRetriever {
   }
 
   constructor(params: CloudConstructorParams & CloudRetrieveParams) {
+    super();
     this.clientParams = { apiKey: params.apiKey, baseUrl: params.baseUrl };
     initService(this.clientParams);
     this.retrieveParams = params;
@@ -54,11 +56,7 @@ export class LlamaCloudRetriever implements BaseRetriever {
     }
   }
 
-  @wrapEventCaller
-  async retrieve({
-    query,
-    preFilters,
-  }: RetrieveParams): Promise<NodeWithScore[]> {
+  async _retrieve(query: QueryBundle): Promise<NodeWithScore[]> {
     const { data: pipelines } =
       await PipelinesService.searchPipelinesApiV1PipelinesGet({
         query: {
@@ -97,19 +95,11 @@ export class LlamaCloudRetriever implements BaseRetriever {
         body: {
           ...this.retrieveParams,
           query: extractText(query),
-          search_filters:
-            this.retrieveParams.filters ?? (preFilters as MetadataFilters),
+          search_filters: this.retrieveParams.filters as MetadataFilters,
           dense_similarity_top_k: this.retrieveParams.similarityTopK!,
         },
       });
 
-    const nodesWithScores = this.resultNodesToNodeWithScore(
-      results.retrieval_nodes,
-    );
-    Settings.callbackManager.dispatchEvent("retrieve-end", {
-      query,
-      nodes: nodesWithScores,
-    });
-    return nodesWithScores;
+    return this.resultNodesToNodeWithScore(results.retrieval_nodes);
   }
 }
diff --git a/packages/llamaindex/src/engines/chat/CondenseQuestionChatEngine.ts b/packages/llamaindex/src/engines/chat/CondenseQuestionChatEngine.ts
index 2be3eb548..4ebff2f40 100644
--- a/packages/llamaindex/src/engines/chat/CondenseQuestionChatEngine.ts
+++ b/packages/llamaindex/src/engines/chat/CondenseQuestionChatEngine.ts
@@ -1,10 +1,13 @@
+import {
+  BaseChatEngine,
+  type ChatEngineParams,
+} from "@llamaindex/core/chat-engine";
 import type { ChatMessage, LLM } from "@llamaindex/core/llms";
 import { BaseMemory, ChatMemoryBuffer } from "@llamaindex/core/memory";
 import {
   type CondenseQuestionPrompt,
   defaultCondenseQuestionPrompt,
   type ModuleRecord,
-  PromptMixin,
 } from "@llamaindex/core/prompts";
 import type { BaseQueryEngine } from "@llamaindex/core/query-engine";
 import type { EngineResponse } from "@llamaindex/core/schema";
@@ -16,11 +19,6 @@ import {
 } from "@llamaindex/core/utils";
 import type { ServiceContext } from "../../ServiceContext.js";
 import { llmFromSettingsOrContext } from "../../Settings.js";
-import type {
-  ChatEngine,
-  ChatEngineParamsNonStreaming,
-  ChatEngineParamsStreaming,
-} from "./types.js";
 
 /**
  * CondenseQuestionChatEngine is used in conjunction with a Index (for example VectorStoreIndex).
@@ -32,16 +30,16 @@ import type {
  * underlying data. It performs less well when the chat messages are not questions about the
  * data, or are very referential to previous context.
  */
-
-export class CondenseQuestionChatEngine
-  extends PromptMixin
-  implements ChatEngine
-{
+export class CondenseQuestionChatEngine extends BaseChatEngine {
   queryEngine: BaseQueryEngine;
-  chatHistory: BaseMemory;
+  memory: BaseMemory;
   llm: LLM;
   condenseMessagePrompt: CondenseQuestionPrompt;
 
+  get chatHistory() {
+    return this.memory.getMessages();
+  }
+
   constructor(init: {
     queryEngine: BaseQueryEngine;
     chatHistory: ChatMessage[];
@@ -51,7 +49,7 @@ export class CondenseQuestionChatEngine
     super();
 
     this.queryEngine = init.queryEngine;
-    this.chatHistory = new ChatMemoryBuffer({
+    this.memory = new ChatMemoryBuffer({
       chatHistory: init?.chatHistory,
     });
     this.llm = llmFromSettingsOrContext(init?.serviceContext);
@@ -88,15 +86,17 @@ export class CondenseQuestionChatEngine
     });
   }
 
+  chat(params: ChatEngineParams, stream?: false): Promise<EngineResponse>;
   chat(
-    params: ChatEngineParamsStreaming,
+    params: ChatEngineParams,
+    stream: true,
   ): Promise<AsyncIterable<EngineResponse>>;
-  chat(params: ChatEngineParamsNonStreaming): Promise<EngineResponse>;
   @wrapEventCaller
   async chat(
-    params: ChatEngineParamsStreaming | ChatEngineParamsNonStreaming,
+    params: ChatEngineParams,
+    stream = false,
   ): Promise<EngineResponse | AsyncIterable<EngineResponse>> {
-    const { message, stream } = params;
+    const { message } = params;
     const chatHistory = params.chatHistory
       ? new ChatMemoryBuffer({
           chatHistory:
@@ -104,7 +104,7 @@ export class CondenseQuestionChatEngine
               ? await params.chatHistory.getMessages()
               : params.chatHistory,
         })
-      : this.chatHistory;
+      : this.memory;
 
     const condensedQuestion = (
       await this.condenseQuestion(chatHistory, extractText(message))
@@ -140,6 +140,6 @@ export class CondenseQuestionChatEngine
   }
 
   reset() {
-    this.chatHistory.reset();
+    this.memory.reset();
   }
 }
diff --git a/packages/llamaindex/src/engines/chat/ContextChatEngine.ts b/packages/llamaindex/src/engines/chat/ContextChatEngine.ts
index 23d56a00a..26baddf9c 100644
--- a/packages/llamaindex/src/engines/chat/ContextChatEngine.ts
+++ b/packages/llamaindex/src/engines/chat/ContextChatEngine.ts
@@ -1,3 +1,7 @@
+import type {
+  BaseChatEngine,
+  ChatEngineParams,
+} from "@llamaindex/core/chat-engine";
 import type {
   ChatMessage,
   LLM,
@@ -11,6 +15,7 @@ import {
   PromptMixin,
   type PromptsRecord,
 } from "@llamaindex/core/prompts";
+import type { BaseRetriever } from "@llamaindex/core/retriever";
 import { EngineResponse, MetadataMode } from "@llamaindex/core/schema";
 import {
   extractText,
@@ -18,27 +23,25 @@ import {
   streamReducer,
   wrapEventCaller,
 } from "@llamaindex/core/utils";
-import type { BaseRetriever } from "../../Retriever.js";
 import { Settings } from "../../Settings.js";
 import type { BaseNodePostprocessor } from "../../postprocessors/index.js";
 import { DefaultContextGenerator } from "./DefaultContextGenerator.js";
-import type {
-  ChatEngine,
-  ChatEngineParamsNonStreaming,
-  ChatEngineParamsStreaming,
-  ContextGenerator,
-} from "./types.js";
+import type { ContextGenerator } from "./types.js";
 
 /**
  * ContextChatEngine uses the Index to get the appropriate context for each query.
  * The context is stored in the system prompt, and the chat history is chunk: ChatResponseChunk, nodes?: NodeWithScore<import("/Users/marcus/code/llamaindex/LlamaIndexTS/packages/core/src/Node").Metadata>[], nodes?: NodeWithScore<import("/Users/marcus/code/llamaindex/LlamaIndexTS/packages/core/src/Node").Metadata>[]lowing the appropriate context to be surfaced for each query.
  */
-export class ContextChatEngine extends PromptMixin implements ChatEngine {
+export class ContextChatEngine extends PromptMixin implements BaseChatEngine {
   chatModel: LLM;
-  chatHistory: BaseMemory;
+  memory: BaseMemory;
   contextGenerator: ContextGenerator & PromptMixin;
   systemPrompt?: string | undefined;
 
+  get chatHistory() {
+    return this.memory.getMessages();
+  }
+
   constructor(init: {
     retriever: BaseRetriever;
     chatModel?: LLM | undefined;
@@ -50,7 +53,7 @@ export class ContextChatEngine extends PromptMixin implements ChatEngine {
   }) {
     super();
     this.chatModel = init.chatModel ?? Settings.llm;
-    this.chatHistory = new ChatMemoryBuffer({ chatHistory: init?.chatHistory });
+    this.memory = new ChatMemoryBuffer({ chatHistory: init?.chatHistory });
     this.contextGenerator = new DefaultContextGenerator({
       retriever: init.retriever,
       contextSystemPrompt: init?.contextSystemPrompt,
@@ -79,15 +82,17 @@ export class ContextChatEngine extends PromptMixin implements ChatEngine {
     };
   }
 
+  chat(params: ChatEngineParams, stream?: false): Promise<EngineResponse>;
   chat(
-    params: ChatEngineParamsStreaming,
+    params: ChatEngineParams,
+    stream: true,
   ): Promise<AsyncIterable<EngineResponse>>;
-  chat(params: ChatEngineParamsNonStreaming): Promise<EngineResponse>;
   @wrapEventCaller
   async chat(
-    params: ChatEngineParamsStreaming | ChatEngineParamsNonStreaming,
+    params: ChatEngineParams,
+    stream = false,
   ): Promise<EngineResponse | AsyncIterable<EngineResponse>> {
-    const { message, stream } = params;
+    const { message } = params;
     const chatHistory = params.chatHistory
       ? new ChatMemoryBuffer({
           chatHistory:
@@ -95,7 +100,7 @@ export class ContextChatEngine extends PromptMixin implements ChatEngine {
               ? await params.chatHistory.getMessages()
               : params.chatHistory,
         })
-      : this.chatHistory;
+      : this.memory;
     const requestMessages = await this.prepareRequestMessages(
       message,
       chatHistory,
@@ -125,7 +130,7 @@ export class ContextChatEngine extends PromptMixin implements ChatEngine {
   }
 
   reset() {
-    this.chatHistory.reset();
+    this.memory.reset();
   }
 
   private async prepareRequestMessages(
diff --git a/packages/llamaindex/src/engines/chat/DefaultContextGenerator.ts b/packages/llamaindex/src/engines/chat/DefaultContextGenerator.ts
index 976400556..c54e7d03e 100644
--- a/packages/llamaindex/src/engines/chat/DefaultContextGenerator.ts
+++ b/packages/llamaindex/src/engines/chat/DefaultContextGenerator.ts
@@ -5,10 +5,10 @@ import {
   type ModuleRecord,
   PromptMixin,
 } from "@llamaindex/core/prompts";
+import type { BaseRetriever } from "@llamaindex/core/retriever";
 import { MetadataMode, type NodeWithScore } from "@llamaindex/core/schema";
 import { createMessageContent } from "@llamaindex/core/utils";
 import type { BaseNodePostprocessor } from "../../postprocessors/index.js";
-import type { BaseRetriever } from "../../Retriever.js";
 import type { Context, ContextGenerator } from "./types.js";
 
 export class DefaultContextGenerator
diff --git a/packages/llamaindex/src/engines/chat/SimpleChatEngine.ts b/packages/llamaindex/src/engines/chat/SimpleChatEngine.ts
index 5fba02501..d3dc9f5d0 100644
--- a/packages/llamaindex/src/engines/chat/SimpleChatEngine.ts
+++ b/packages/llamaindex/src/engines/chat/SimpleChatEngine.ts
@@ -1,3 +1,7 @@
+import type {
+  BaseChatEngine,
+  ChatEngineParams,
+} from "@llamaindex/core/chat-engine";
 import type { LLM } from "@llamaindex/core/llms";
 import { BaseMemory, ChatMemoryBuffer } from "@llamaindex/core/memory";
 import { EngineResponse } from "@llamaindex/core/schema";
@@ -7,34 +11,35 @@ import {
   wrapEventCaller,
 } from "@llamaindex/core/utils";
 import { Settings } from "../../Settings.js";
-import type {
-  ChatEngine,
-  ChatEngineParamsNonStreaming,
-  ChatEngineParamsStreaming,
-} from "./types.js";
 
 /**
  * SimpleChatEngine is the simplest possible chat engine. Useful for using your own custom prompts.
  */
 
-export class SimpleChatEngine implements ChatEngine {
-  chatHistory: BaseMemory;
+export class SimpleChatEngine implements BaseChatEngine {
+  memory: BaseMemory;
   llm: LLM;
 
+  get chatHistory() {
+    return this.memory.getMessages();
+  }
+
   constructor(init?: Partial<SimpleChatEngine>) {
-    this.chatHistory = init?.chatHistory ?? new ChatMemoryBuffer();
+    this.memory = init?.memory ?? new ChatMemoryBuffer();
     this.llm = init?.llm ?? Settings.llm;
   }
 
+  chat(params: ChatEngineParams, stream?: false): Promise<EngineResponse>;
   chat(
-    params: ChatEngineParamsStreaming,
+    params: ChatEngineParams,
+    stream: true,
   ): Promise<AsyncIterable<EngineResponse>>;
-  chat(params: ChatEngineParamsNonStreaming): Promise<EngineResponse>;
   @wrapEventCaller
   async chat(
-    params: ChatEngineParamsStreaming | ChatEngineParamsNonStreaming,
+    params: ChatEngineParams,
+    stream = false,
   ): Promise<EngineResponse | AsyncIterable<EngineResponse>> {
-    const { message, stream } = params;
+    const { message } = params;
 
     const chatHistory = params.chatHistory
       ? new ChatMemoryBuffer({
@@ -43,7 +48,7 @@ export class SimpleChatEngine implements ChatEngine {
               ? await params.chatHistory.getMessages()
               : params.chatHistory,
         })
-      : this.chatHistory;
+      : this.memory;
     chatHistory.put({ content: message, role: "user" });
 
     if (stream) {
@@ -73,6 +78,6 @@ export class SimpleChatEngine implements ChatEngine {
   }
 
   reset() {
-    this.chatHistory.reset();
+    this.memory.reset();
   }
 }
diff --git a/packages/llamaindex/src/engines/chat/types.ts b/packages/llamaindex/src/engines/chat/types.ts
index 9b3b18c0b..2c5a811d1 100644
--- a/packages/llamaindex/src/engines/chat/types.ts
+++ b/packages/llamaindex/src/engines/chat/types.ts
@@ -1,58 +1,10 @@
-import type { ChatMessage, MessageContent } from "@llamaindex/core/llms";
-import type { BaseMemory } from "@llamaindex/core/memory";
-import { EngineResponse, type NodeWithScore } from "@llamaindex/core/schema";
-
-/**
- * Represents the base parameters for ChatEngine.
- */
-export interface ChatEngineParamsBase {
-  message: MessageContent;
-  /**
-   * Optional chat history if you want to customize the chat history.
-   */
-  chatHistory?: ChatMessage[] | BaseMemory;
-  /**
-   * Optional flag to enable verbose mode.
-   * @default false
-   */
-  verbose?: boolean;
-}
-
-export interface ChatEngineParamsStreaming extends ChatEngineParamsBase {
-  stream: true;
-}
-
-export interface ChatEngineParamsNonStreaming extends ChatEngineParamsBase {
-  stream?: false | null;
-}
-
-/**
- * A ChatEngine is used to handle back and forth chats between the application and the LLM.
- */
-export interface ChatEngine<
-  // synchronous response
-  R = EngineResponse,
-  // asynchronous response
-  AR extends AsyncIterable<unknown> = AsyncIterable<R>,
-> {
-  /**
-   * Send message along with the class's current chat history to the LLM.
-   * @param params
-   */
-  chat(params: ChatEngineParamsStreaming): Promise<AR>;
-  chat(params: ChatEngineParamsNonStreaming): Promise<R>;
-
-  /**
-   * Resets the chat history so that it's empty.
-   */
-  reset(): void;
-}
+import type { ChatMessage } from "@llamaindex/core/llms";
+import type { NodeWithScore } from "@llamaindex/core/schema";
 
 export interface Context {
   message: ChatMessage;
   nodes: NodeWithScore[];
 }
-
 /**
  * A ContextGenerator is used to generate a context based on a message's text content
  */
diff --git a/packages/llamaindex/src/engines/query/RetrieverQueryEngine.ts b/packages/llamaindex/src/engines/query/RetrieverQueryEngine.ts
index aebe09dd6..ab1906e07 100644
--- a/packages/llamaindex/src/engines/query/RetrieverQueryEngine.ts
+++ b/packages/llamaindex/src/engines/query/RetrieverQueryEngine.ts
@@ -1,10 +1,11 @@
-import { BaseQueryEngine } from "@llamaindex/core/query-engine";
+import type { MessageContent } from "@llamaindex/core/llms";
+import { BaseQueryEngine, type QueryType } from "@llamaindex/core/query-engine";
 import type { BaseSynthesizer } from "@llamaindex/core/response-synthesizers";
 import { getResponseSynthesizer } from "@llamaindex/core/response-synthesizers";
+import { BaseRetriever } from "@llamaindex/core/retriever";
 import { type NodeWithScore } from "@llamaindex/core/schema";
 import { extractText } from "@llamaindex/core/utils";
 import type { BaseNodePostprocessor } from "../../postprocessors/index.js";
-import type { BaseRetriever } from "../../Retriever.js";
 
 /**
  * A query engine that uses a retriever to query an index and then synthesizes the response.
@@ -67,7 +68,10 @@ export class RetrieverQueryEngine extends BaseQueryEngine {
     };
   }
 
-  private async applyNodePostprocessors(nodes: NodeWithScore[], query: string) {
+  private async applyNodePostprocessors(
+    nodes: NodeWithScore[],
+    query: MessageContent,
+  ) {
     let nodesWithScore = nodes;
 
     for (const postprocessor of this.nodePostprocessors) {
@@ -80,12 +84,10 @@ export class RetrieverQueryEngine extends BaseQueryEngine {
     return nodesWithScore;
   }
 
-  private async retrieve(query: string) {
-    const nodes = await this.retriever.retrieve({
-      query,
-      preFilters: this.preFilters,
-    });
+  override async retrieve(query: QueryType) {
+    const nodes = await this.retriever.retrieve(query);
 
-    return await this.applyNodePostprocessors(nodes, query);
+    const messageContent = typeof query === "string" ? query : query.query;
+    return await this.applyNodePostprocessors(nodes, messageContent);
   }
 }
diff --git a/packages/llamaindex/src/index.edge.ts b/packages/llamaindex/src/index.edge.ts
index f37f249a2..a2b819430 100644
--- a/packages/llamaindex/src/index.edge.ts
+++ b/packages/llamaindex/src/index.edge.ts
@@ -1,6 +1,6 @@
 import type { AgentEndEvent, AgentStartEvent } from "./agent/types.js";
-import type { RetrievalEndEvent, RetrievalStartEvent } from "./llm/types.js";
 
+export * from "@llamaindex/core/chat-engine";
 export {
   CallbackManager,
   DEFAULT_BASE_URL,
@@ -35,12 +35,11 @@ export * from "@llamaindex/core/llms";
 export * from "@llamaindex/core/prompts";
 export * from "@llamaindex/core/query-engine";
 export * from "@llamaindex/core/response-synthesizers";
+export * from "@llamaindex/core/retriever";
 export * from "@llamaindex/core/schema";
 
 declare module "@llamaindex/core/global" {
   export interface LlamaIndexEventMaps {
-    "retrieve-start": RetrievalStartEvent;
-    "retrieve-end": RetrievalEndEvent;
     // agent events
     "agent-start": AgentStartEvent;
     "agent-end": AgentEndEvent;
@@ -66,7 +65,6 @@ export * from "./objects/index.js";
 export * from "./OutputParser.js";
 export * from "./postprocessors/index.js";
 export * from "./QuestionGenerator.js";
-export * from "./Retriever.js";
 export * from "./selectors/index.js";
 export * from "./ServiceContext.js";
 export { Settings } from "./Settings.js";
diff --git a/packages/llamaindex/src/indices/BaseIndex.ts b/packages/llamaindex/src/indices/BaseIndex.ts
index 3d5d55c2c..c5beb5d47 100644
--- a/packages/llamaindex/src/indices/BaseIndex.ts
+++ b/packages/llamaindex/src/indices/BaseIndex.ts
@@ -1,7 +1,7 @@
 import type { BaseQueryEngine } from "@llamaindex/core/query-engine";
 import type { BaseSynthesizer } from "@llamaindex/core/response-synthesizers";
+import type { BaseRetriever } from "@llamaindex/core/retriever";
 import type { BaseNode, Document } from "@llamaindex/core/schema";
-import type { BaseRetriever } from "../Retriever.js";
 import type { ServiceContext } from "../ServiceContext.js";
 import { nodeParserFromSettingsOrContext } from "../Settings.js";
 import { runTransformations } from "../ingestion/IngestionPipeline.js";
diff --git a/packages/llamaindex/src/indices/keyword/index.ts b/packages/llamaindex/src/indices/keyword/index.ts
index 6b326317a..911850616 100644
--- a/packages/llamaindex/src/indices/keyword/index.ts
+++ b/packages/llamaindex/src/indices/keyword/index.ts
@@ -5,7 +5,6 @@ import type {
   NodeWithScore,
 } from "@llamaindex/core/schema";
 import { MetadataMode } from "@llamaindex/core/schema";
-import type { BaseRetriever, RetrieveParams } from "../../Retriever.js";
 import type { ServiceContext } from "../../ServiceContext.js";
 import { serviceContextFromDefaults } from "../../ServiceContext.js";
 import { RetrieverQueryEngine } from "../../engines/query/index.js";
@@ -29,7 +28,11 @@ import {
   type KeywordExtractPrompt,
   type QueryKeywordExtractPrompt,
 } from "@llamaindex/core/prompts";
-import type { BaseQueryEngine } from "@llamaindex/core/query-engine";
+import type {
+  BaseQueryEngine,
+  QueryBundle,
+} from "@llamaindex/core/query-engine";
+import { BaseRetriever } from "@llamaindex/core/retriever";
 import { extractText } from "@llamaindex/core/utils";
 import { llmFromSettingsOrContext } from "../../Settings.js";
 
@@ -48,7 +51,7 @@ export enum KeywordTableRetrieverMode {
 }
 
 // Base Keyword Table Retriever
-abstract class BaseKeywordTableRetriever implements BaseRetriever {
+abstract class BaseKeywordTableRetriever extends BaseRetriever {
   protected index: KeywordTableIndex;
   protected indexStruct: KeywordTable;
   protected docstore: BaseDocumentStore;
@@ -72,6 +75,7 @@ abstract class BaseKeywordTableRetriever implements BaseRetriever {
     maxKeywordsPerQuery: number;
     numChunksPerQuery: number;
   }) {
+    super();
     this.index = index;
     this.indexStruct = index.indexStruct;
     this.docstore = index.docStore;
@@ -87,7 +91,7 @@ abstract class BaseKeywordTableRetriever implements BaseRetriever {
 
   abstract getKeywords(query: string): Promise<string[]>;
 
-  async retrieve({ query }: RetrieveParams): Promise<NodeWithScore[]> {
+  async _retrieve(query: QueryBundle): Promise<NodeWithScore[]> {
     const keywords = await this.getKeywords(extractText(query));
     const chunkIndicesCount: { [key: string]: number } = {};
     const filteredKeywords = keywords.filter((keyword) =>
diff --git a/packages/llamaindex/src/indices/summary/index.ts b/packages/llamaindex/src/indices/summary/index.ts
index 375af4a6a..c449a1297 100644
--- a/packages/llamaindex/src/indices/summary/index.ts
+++ b/packages/llamaindex/src/indices/summary/index.ts
@@ -2,16 +2,17 @@ import {
   type ChoiceSelectPrompt,
   defaultChoiceSelectPrompt,
 } from "@llamaindex/core/prompts";
+import type { QueryBundle } from "@llamaindex/core/query-engine";
 import type { BaseSynthesizer } from "@llamaindex/core/response-synthesizers";
 import { getResponseSynthesizer } from "@llamaindex/core/response-synthesizers";
+import { BaseRetriever } from "@llamaindex/core/retriever";
 import type {
   BaseNode,
   Document,
   NodeWithScore,
 } from "@llamaindex/core/schema";
-import { extractText, wrapEventCaller } from "@llamaindex/core/utils";
+import { extractText } from "@llamaindex/core/utils";
 import _ from "lodash";
-import type { BaseRetriever, RetrieveParams } from "../../Retriever.js";
 import type { ServiceContext } from "../../ServiceContext.js";
 import {
   llmFromSettingsOrContext,
@@ -279,15 +280,15 @@ export type ListRetrieverMode = SummaryRetrieverMode;
 /**
  * Simple retriever for SummaryIndex that returns all nodes
  */
-export class SummaryIndexRetriever implements BaseRetriever {
+export class SummaryIndexRetriever extends BaseRetriever {
   index: SummaryIndex;
 
   constructor(index: SummaryIndex) {
+    super();
     this.index = index;
   }
 
-  @wrapEventCaller
-  async retrieve({ query }: RetrieveParams): Promise<NodeWithScore[]> {
+  async _retrieve(queryBundle: QueryBundle): Promise<NodeWithScore[]> {
     const nodeIds = this.index.indexStruct.nodes;
     const nodes = await this.index.docStore.getNodes(nodeIds);
     return nodes.map((node) => ({
@@ -300,7 +301,7 @@ export class SummaryIndexRetriever implements BaseRetriever {
 /**
  * LLM retriever for SummaryIndex which lets you select the most relevant chunks.
  */
-export class SummaryIndexLLMRetriever implements BaseRetriever {
+export class SummaryIndexLLMRetriever extends BaseRetriever {
   index: SummaryIndex;
   choiceSelectPrompt: ChoiceSelectPrompt;
   choiceBatchSize: number;
@@ -317,6 +318,7 @@ export class SummaryIndexLLMRetriever implements BaseRetriever {
     parseChoiceSelectAnswerFn?: ChoiceSelectParserFunction,
     serviceContext?: ServiceContext,
   ) {
+    super();
     this.index = index;
     this.choiceSelectPrompt = choiceSelectPrompt || defaultChoiceSelectPrompt;
     this.choiceBatchSize = choiceBatchSize;
@@ -326,7 +328,7 @@ export class SummaryIndexLLMRetriever implements BaseRetriever {
     this.serviceContext = serviceContext || index.serviceContext;
   }
 
-  async retrieve({ query }: RetrieveParams): Promise<NodeWithScore[]> {
+  async _retrieve(query: QueryBundle): Promise<NodeWithScore[]> {
     const nodeIds = this.index.indexStruct.nodes;
     const results: NodeWithScore[] = [];
 
diff --git a/packages/llamaindex/src/indices/vectorStore/index.ts b/packages/llamaindex/src/indices/vectorStore/index.ts
index c8a5bbd9a..8b88bbe41 100644
--- a/packages/llamaindex/src/indices/vectorStore/index.ts
+++ b/packages/llamaindex/src/indices/vectorStore/index.ts
@@ -2,9 +2,10 @@ import {
   DEFAULT_SIMILARITY_TOP_K,
   type BaseEmbedding,
 } from "@llamaindex/core/embeddings";
-import { Settings } from "@llamaindex/core/global";
 import type { MessageContent } from "@llamaindex/core/llms";
+import type { QueryBundle } from "@llamaindex/core/query-engine";
 import type { BaseSynthesizer } from "@llamaindex/core/response-synthesizers";
+import { BaseRetriever } from "@llamaindex/core/retriever";
 import {
   ImageNode,
   ModalityType,
@@ -14,8 +15,6 @@ import {
   type Document,
   type NodeWithScore,
 } from "@llamaindex/core/schema";
-import { wrapEventCaller } from "@llamaindex/core/utils";
-import type { BaseRetriever, RetrieveParams } from "../../Retriever.js";
 import type { ServiceContext } from "../../ServiceContext.js";
 import { nodeParserFromSettingsOrContext } from "../../Settings.js";
 import { RetrieverQueryEngine } from "../../engines/query/RetrieverQueryEngine.js";
@@ -388,7 +387,7 @@ export type VectorIndexRetrieverOptions = {
   filters?: MetadataFilters;
 };
 
-export class VectorIndexRetriever implements BaseRetriever {
+export class VectorIndexRetriever extends BaseRetriever {
   index: VectorStoreIndex;
   topK: TopKMap;
 
@@ -401,6 +400,7 @@ export class VectorIndexRetriever implements BaseRetriever {
     topK,
     filters,
   }: VectorIndexRetrieverOptions) {
+    super();
     this.index = index;
     this.serviceContext = this.index.serviceContext;
     this.topK = topK ?? {
@@ -417,32 +417,17 @@ export class VectorIndexRetriever implements BaseRetriever {
     this.topK[ModalityType.TEXT] = similarityTopK;
   }
 
-  @wrapEventCaller
-  async retrieve({
-    query,
-    preFilters,
-  }: RetrieveParams): Promise<NodeWithScore[]> {
-    Settings.callbackManager.dispatchEvent("retrieve-start", {
-      query,
-    });
+  async _retrieve(params: QueryBundle): Promise<NodeWithScore[]> {
+    const { query } = params;
     const vectorStores = this.index.vectorStores;
     let nodesWithScores: NodeWithScore[] = [];
 
     for (const type in vectorStores) {
       const vectorStore: VectorStore = vectorStores[type as ModalityType]!;
       nodesWithScores = nodesWithScores.concat(
-        await this.retrieveQuery(
-          query,
-          type as ModalityType,
-          vectorStore,
-          preFilters as MetadataFilters,
-        ),
+        await this.retrieveQuery(query, type as ModalityType, vectorStore),
       );
     }
-    Settings.callbackManager.dispatchEvent("retrieve-end", {
-      query,
-      nodes: nodesWithScores,
-    });
     return nodesWithScores;
   }
 
diff --git a/packages/llamaindex/src/llm/types.ts b/packages/llamaindex/src/llm/types.ts
index c947d80c4..e69de29bb 100644
--- a/packages/llamaindex/src/llm/types.ts
+++ b/packages/llamaindex/src/llm/types.ts
@@ -1,10 +0,0 @@
-import type { MessageContent } from "@llamaindex/core/llms";
-import type { NodeWithScore } from "@llamaindex/core/schema";
-
-export type RetrievalStartEvent = {
-  query: MessageContent;
-};
-export type RetrievalEndEvent = {
-  query: MessageContent;
-  nodes: NodeWithScore[];
-};
diff --git a/packages/llamaindex/src/objects/base.ts b/packages/llamaindex/src/objects/base.ts
index 5737a351f..eb7cb0083 100644
--- a/packages/llamaindex/src/objects/base.ts
+++ b/packages/llamaindex/src/objects/base.ts
@@ -1,8 +1,8 @@
 import type { BaseTool, MessageContent } from "@llamaindex/core/llms";
+import { BaseRetriever } from "@llamaindex/core/retriever";
 import type { BaseNode, Metadata } from "@llamaindex/core/schema";
 import { TextNode } from "@llamaindex/core/schema";
 import { extractText } from "@llamaindex/core/utils";
-import type { BaseRetriever } from "../Retriever.js";
 import type { VectorStoreIndex } from "../indices/vectorStore/index.js";
 
 // Assuming that necessary interfaces and classes (like OT, TextNode, BaseNode, etc.) are defined elsewhere
@@ -49,9 +49,6 @@ export abstract class BaseObjectNodeMapping {
 
 // You will need to implement specific subclasses of BaseObjectNodeMapping as per your project requirements.
 
-// todo: multimodal support
-type QueryType = MessageContent;
-
 export class ObjectRetriever<T = unknown> {
   _retriever: BaseRetriever;
   _objectNodeMapping: BaseObjectNodeMapping;
@@ -70,7 +67,7 @@ export class ObjectRetriever<T = unknown> {
   }
 
   // Translating the retrieve method
-  async retrieve(strOrQueryBundle: QueryType): Promise<T[]> {
+  async retrieve(strOrQueryBundle: MessageContent): Promise<T[]> {
     const nodes = await this.retriever.retrieve({
       query: extractText(strOrQueryBundle),
     });
-- 
GitLab