Skip to content
Snippets Groups Projects
Unverified Commit 359fd330 authored by Alex Yang's avatar Alex Yang Committed by GitHub
Browse files

refactor(core): move `ContextChatEngine` and `SimpleChatEngine` (#1401)

parent efb7e1b8
Branches
Tags
No related merge requests found
---
"@llamaindex/core": patch
"llamaindex": patch
---
refactor(core): move `ContextChatEngine` and `SimpleChatEngine`
......@@ -2,9 +2,9 @@ import { ClientMDXContent } from "@/components/mdx";
import { BotMessage } from "@/components/message";
import { Skeleton } from "@/components/ui/skeleton";
import { LlamaCloudRetriever } from "@/deps/cloud";
import { ContextChatEngine } from "@llamaindex/core/chat-engine";
import { Settings } from "@llamaindex/core/global";
import { ChatMessage } from "@llamaindex/core/llms";
import { RetrieverQueryEngine } from "@llamaindex/core/query-engine";
import { OpenAI } from "@llamaindex/openai";
import { createAI, createStreamableUI, getMutableAIState } from "ai/rsc";
import { ReactNode } from "react";
......@@ -50,7 +50,7 @@ export const AIProvider = createAI({
actions: {
query: async (message: string): Promise<UIMessage> => {
"use server";
const queryEngine = new RetrieverQueryEngine(retriever);
const chatEngine = new ContextChatEngine({ retriever });
const id = Date.now();
const aiState = getMutableAIState<typeof AIProvider>();
......@@ -73,10 +73,12 @@ export const AIProvider = createAI({
);
runAsyncFnWithoutBlocking(async () => {
const response = await queryEngine.query({
query: message,
const response = await chatEngine.chat({
message,
chatHistory: aiState.get().messages,
stream: true,
});
let content = "";
for await (const { delta } of response) {
......
......@@ -3,7 +3,7 @@ import {
BaseChatEngine,
type NonStreamingChatEngineParams,
type StreamingChatEngineParams,
} from "../chat-engine";
} from "../chat-engine/base";
import { wrapEventCaller } from "../decorator";
import { Settings } from "../global";
import type {
......
import type { ChatMessage, MessageContent } from "../llms";
import type { BaseMemory } from "../memory";
import { EngineResponse } from "../schema";
export interface BaseChatEngineParams<
AdditionalMessageOptions extends object = object,
> {
message: MessageContent;
/**
* Optional chat history if you want to customize the chat history.
*/
chatHistory?:
| ChatMessage<AdditionalMessageOptions>[]
| BaseMemory<AdditionalMessageOptions>;
}
export interface StreamingChatEngineParams<
AdditionalMessageOptions extends object = object,
> extends BaseChatEngineParams<AdditionalMessageOptions> {
stream: true;
}
export interface NonStreamingChatEngineParams<
AdditionalMessageOptions extends object = object,
> extends BaseChatEngineParams<AdditionalMessageOptions> {
stream?: false;
}
export abstract class BaseChatEngine {
abstract chat(params: NonStreamingChatEngineParams): Promise<EngineResponse>;
abstract chat(
params: StreamingChatEngineParams,
): Promise<AsyncIterable<EngineResponse>>;
abstract chatHistory: ChatMessage[] | Promise<ChatMessage[]>;
}
import type {
BaseChatEngine,
NonStreamingChatEngineParams,
StreamingChatEngineParams,
} from "@llamaindex/core/chat-engine";
import { wrapEventCaller } from "@llamaindex/core/decorator";
import type {
ChatMessage,
LLM,
MessageContent,
MessageType,
} from "@llamaindex/core/llms";
import { BaseMemory, ChatMemoryBuffer } from "@llamaindex/core/memory";
import type { BaseNodePostprocessor } from "@llamaindex/core/postprocessor";
import { wrapEventCaller } from "../decorator";
import { Settings } from "../global";
import type { ChatMessage, LLM, MessageContent, MessageType } from "../llms";
import { BaseMemory, ChatMemoryBuffer } from "../memory";
import type { BaseNodePostprocessor } from "../postprocessor";
import {
type ContextSystemPrompt,
type ModuleRecord,
PromptMixin,
type PromptsRecord,
} from "@llamaindex/core/prompts";
import type { BaseRetriever } from "@llamaindex/core/retriever";
import { EngineResponse, MetadataMode } from "@llamaindex/core/schema";
import {
extractText,
streamConverter,
streamReducer,
} from "@llamaindex/core/utils";
import { Settings } from "../../Settings.js";
import { DefaultContextGenerator } from "./DefaultContextGenerator.js";
import type { ContextGenerator } from "./types.js";
} from "../prompts";
import type { BaseRetriever } from "../retriever";
import { EngineResponse, MetadataMode } from "../schema";
import { extractText, streamConverter, streamReducer } from "../utils";
import type {
BaseChatEngine,
NonStreamingChatEngineParams,
StreamingChatEngineParams,
} from "./base";
import { DefaultContextGenerator } from "./default-context-generator";
import type { ContextGenerator } from "./type";
/**
* ContextChatEngine uses the Index to get the appropriate context for each query.
......
import type { MessageContent, MessageType } from "@llamaindex/core/llms";
import type { BaseNodePostprocessor } from "@llamaindex/core/postprocessor";
import type { MessageContent, MessageType } from "../llms";
import type { BaseNodePostprocessor } from "../postprocessor";
import {
type ContextSystemPrompt,
defaultContextSystemPrompt,
type ModuleRecord,
PromptMixin,
} from "@llamaindex/core/prompts";
import { createMessageContent } from "@llamaindex/core/response-synthesizers";
import type { BaseRetriever } from "@llamaindex/core/retriever";
import { MetadataMode, type NodeWithScore } from "@llamaindex/core/schema";
import type { Context, ContextGenerator } from "./types.js";
} from "../prompts";
import { createMessageContent } from "../response-synthesizers";
import type { BaseRetriever } from "../retriever";
import { MetadataMode, type NodeWithScore } from "../schema";
import type { Context, ContextGenerator } from "./type.js";
export class DefaultContextGenerator
extends PromptMixin
......
import type { ChatMessage, MessageContent } from "../llms";
import type { BaseMemory } from "../memory";
import { EngineResponse } from "../schema";
export interface BaseChatEngineParams<
AdditionalMessageOptions extends object = object,
> {
message: MessageContent;
/**
* Optional chat history if you want to customize the chat history.
*/
chatHistory?:
| ChatMessage<AdditionalMessageOptions>[]
| BaseMemory<AdditionalMessageOptions>;
}
export interface StreamingChatEngineParams<
AdditionalMessageOptions extends object = object,
> extends BaseChatEngineParams<AdditionalMessageOptions> {
stream: true;
}
export interface NonStreamingChatEngineParams<
AdditionalMessageOptions extends object = object,
> extends BaseChatEngineParams<AdditionalMessageOptions> {
stream?: false;
}
export abstract class BaseChatEngine {
abstract chat(params: NonStreamingChatEngineParams): Promise<EngineResponse>;
abstract chat(
params: StreamingChatEngineParams,
): Promise<AsyncIterable<EngineResponse>>;
abstract chatHistory: ChatMessage[] | Promise<ChatMessage[]>;
}
export {
BaseChatEngine,
type BaseChatEngineParams,
type NonStreamingChatEngineParams,
type StreamingChatEngineParams,
} from "./base";
export { ContextChatEngine } from "./context-chat-engine";
export { DefaultContextGenerator } from "./default-context-generator";
export { SimpleChatEngine } from "./simple-chat-engine";
import type { LLM } from "../llms";
import { BaseMemory, ChatMemoryBuffer } from "../memory";
import { EngineResponse } from "../schema";
import { streamConverter, streamReducer } from "../utils";
import type {
BaseChatEngine,
NonStreamingChatEngineParams,
StreamingChatEngineParams,
} from "@llamaindex/core/chat-engine";
import type { LLM } from "@llamaindex/core/llms";
import { BaseMemory, ChatMemoryBuffer } from "@llamaindex/core/memory";
import { EngineResponse } from "@llamaindex/core/schema";
import { streamConverter, streamReducer } from "@llamaindex/core/utils";
} from "./base";
import { wrapEventCaller } from "@llamaindex/core/decorator";
import { Settings } from "../../Settings.js";
import { wrapEventCaller } from "../decorator";
import { Settings } from "../global";
/**
* SimpleChatEngine is the simplest possible chat engine. Useful for using your own custom prompts.
......
import type { ChatMessage } from "@llamaindex/core/llms";
import type { NodeWithScore } from "@llamaindex/core/schema";
import type { ChatMessage } from "../llms";
import type { NodeWithScore } from "../schema";
export interface Context {
message: ChatMessage;
nodes: NodeWithScore[];
}
/**
* A ContextGenerator is used to generate a context based on a message's text content
*/
......
export * from "@llamaindex/core/chat-engine";
export { CondenseQuestionChatEngine } from "./CondenseQuestionChatEngine.js";
export { ContextChatEngine } from "./ContextChatEngine.js";
export { SimpleChatEngine } from "./SimpleChatEngine.js";
export * from "./types.js";
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment