From c4bd0a521547194594bd987e845bb5fb69fbcb04 Mon Sep 17 00:00:00 2001 From: Alex Yang <himself65@outlook.com> Date: Mon, 8 Jul 2024 15:48:59 -0700 Subject: [PATCH] refactor: move llm & callback manager to core module (#1026) --- examples/package.json | 1 + examples/qdrantdb/preFilters.ts | 17 +- examples/recipes/cost-analysis.ts | 2 +- packages/community/package.json | 4 +- packages/community/src/llm/bedrock/base.ts | 25 +- .../community/src/llm/bedrock/provider.ts | 4 +- .../src/llm/bedrock/providers/anthropic.ts | 2 +- .../src/llm/bedrock/providers/meta.ts | 2 +- packages/community/src/llm/bedrock/utils.ts | 4 +- packages/core/package.json | 14 ++ packages/core/src/global/index.ts | 11 + packages/core/src/global/settings.ts | 21 ++ .../src/global/settings/callback-manager.ts | 131 ++++++++++ .../src/llm => core/src/llms}/base.ts | 5 +- packages/core/src/llms/index.ts | 1 + .../src/utils/event-caller.ts} | 11 +- packages/core/src/utils/index.ts | 54 ++++ packages/core/src/utils/llms.ts | 79 ++++++ packages/core/src/utils/wrap-llm-event.ts | 88 +++++++ packages/core/tsconfig.json | 1 + .../llamaindex/e2e/fixtures/llm/openai.ts | 2 +- packages/llamaindex/e2e/node/claude.e2e.ts | 2 +- packages/llamaindex/e2e/node/openai.e2e.ts | 2 +- packages/llamaindex/e2e/node/react.e2e.ts | 2 +- packages/llamaindex/e2e/node/utils.ts | 2 +- packages/llamaindex/e2e/package.json | 1 + packages/llamaindex/src/ChatHistory.ts | 2 +- packages/llamaindex/src/EngineResponse.ts | 2 +- packages/llamaindex/src/Settings.ts | 17 +- packages/llamaindex/src/agent/base.ts | 7 +- packages/llamaindex/src/agent/react.ts | 4 +- packages/llamaindex/src/agent/types.ts | 2 +- packages/llamaindex/src/agent/utils.ts | 11 +- .../src/callbacks/CallbackManager.ts | 230 ------------------ .../src/cloud/LlamaCloudRetriever.ts | 7 +- .../src/embeddings/DeepInfraEmbedding.ts | 2 +- .../src/embeddings/MultiModalEmbedding.ts | 2 +- packages/llamaindex/src/embeddings/types.ts | 2 +- .../chat/CondenseQuestionChatEngine.ts | 7 +- .../src/engines/chat/ContextChatEngine.ts | 12 +- .../src/engines/chat/SimpleChatEngine.ts | 7 +- .../src/engines/query/RetrieverQueryEngine.ts | 4 +- .../engines/query/SubQuestionQueryEngine.ts | 2 +- .../llamaindex/src/evaluation/Correctness.ts | 2 +- packages/llamaindex/src/index.edge.ts | 26 +- .../llamaindex/src/indices/keyword/index.ts | 2 +- .../llamaindex/src/indices/summary/index.ts | 18 +- .../src/indices/vectorStore/index.ts | 13 +- .../src/internal/settings/CallbackManager.ts | 25 -- packages/llamaindex/src/internal/type.ts | 5 - packages/llamaindex/src/internal/utils.ts | 2 +- packages/llamaindex/src/llm/anthropic.ts | 4 +- packages/llamaindex/src/llm/gemini/base.ts | 4 +- packages/llamaindex/src/llm/gemini/utils.ts | 2 +- packages/llamaindex/src/llm/gemini/vertex.ts | 2 +- packages/llamaindex/src/llm/huggingface.ts | 21 +- packages/llamaindex/src/llm/index.ts | 2 - packages/llamaindex/src/llm/mistral.ts | 26 +- packages/llamaindex/src/llm/ollama.ts | 2 +- packages/llamaindex/src/llm/openai.ts | 42 ++-- packages/llamaindex/src/llm/portkey.ts | 33 +-- packages/llamaindex/src/llm/replicate_ai.ts | 22 +- packages/llamaindex/src/llm/types.ts | 31 +-- packages/llamaindex/src/llm/utils.ts | 217 ----------------- packages/llamaindex/src/objects/base.ts | 2 +- .../postprocessors/rerankers/CohereRerank.ts | 2 +- .../rerankers/JinaAIReranker.ts | 2 +- .../MultiModalResponseSynthesizer.ts | 2 +- .../src/synthesizers/ResponseSynthesizer.ts | 2 +- .../llamaindex/src/synthesizers/builders.ts | 2 +- packages/llamaindex/src/tools/functionTool.ts | 2 +- packages/llamaindex/src/types.ts | 8 - .../llamaindex/tests/CallbackManager.test.ts | 168 ------------- .../llamaindex/tests/utility/mockOpenAI.ts | 63 +---- pnpm-lock.yaml | 62 ++--- 75 files changed, 641 insertions(+), 983 deletions(-) create mode 100644 packages/core/src/global/settings/callback-manager.ts rename packages/{llamaindex/src/llm => core/src/llms}/base.ts (94%) rename packages/{llamaindex/src/internal/context/EventCaller.ts => core/src/utils/event-caller.ts} (89%) create mode 100644 packages/core/src/utils/index.ts create mode 100644 packages/core/src/utils/llms.ts create mode 100644 packages/core/src/utils/wrap-llm-event.ts delete mode 100644 packages/llamaindex/src/callbacks/CallbackManager.ts delete mode 100644 packages/llamaindex/src/internal/settings/CallbackManager.ts delete mode 100644 packages/llamaindex/src/internal/type.ts delete mode 100644 packages/llamaindex/src/llm/utils.ts delete mode 100644 packages/llamaindex/tests/CallbackManager.test.ts diff --git a/examples/package.json b/examples/package.json index 8e735bfb5..6b11e468b 100644 --- a/examples/package.json +++ b/examples/package.json @@ -6,6 +6,7 @@ "@aws-crypto/sha256-js": "^5.2.0", "@azure/identity": "^4.2.1", "@datastax/astra-db-ts": "^1.2.1", + "@llamaindex/core": "^0.0.3", "@notionhq/client": "^2.2.15", "@pinecone-database/pinecone": "^2.2.2", "@zilliz/milvus2-sdk-node": "^2.4.2", diff --git a/examples/qdrantdb/preFilters.ts b/examples/qdrantdb/preFilters.ts index 35a04e04a..133ac7122 100644 --- a/examples/qdrantdb/preFilters.ts +++ b/examples/qdrantdb/preFilters.ts @@ -1,8 +1,8 @@ import * as dotenv from "dotenv"; import { - CallbackManager, Document, MetadataMode, + NodeWithScore, QdrantVectorStore, Settings, VectorStoreIndex, @@ -10,13 +10,14 @@ import { } from "llamaindex"; // Update callback manager -Settings.callbackManager = new CallbackManager({ - onRetrieve: (data) => { - console.log( - "The retrieved nodes are:", - data.nodes.map((node) => node.node.getContent(MetadataMode.NONE)), - ); - }, +Settings.callbackManager.on("retrieve-end", (event) => { + const data = event.detail.payload; + console.log( + "The retrieved nodes are:", + data.nodes.map((node: NodeWithScore) => + node.node.getContent(MetadataMode.NONE), + ), + ); }); // Load environment variables from local .env file diff --git a/examples/recipes/cost-analysis.ts b/examples/recipes/cost-analysis.ts index 37b4aef36..725d35d1c 100644 --- a/examples/recipes/cost-analysis.ts +++ b/examples/recipes/cost-analysis.ts @@ -1,7 +1,7 @@ +import { extractText } from "@llamaindex/core/utils"; import { encodingForModel } from "js-tiktoken"; import { ChatMessage, OpenAI, type LLMStartEvent } from "llamaindex"; import { Settings } from "llamaindex/Settings"; -import { extractText } from "llamaindex/llm/utils"; const encoding = encodingForModel("gpt-4-0125-preview"); diff --git a/packages/community/package.json b/packages/community/package.json index 2d9343951..b4742a8fc 100644 --- a/packages/community/package.json +++ b/packages/community/package.json @@ -42,11 +42,11 @@ "dev": "bunchee --watch" }, "devDependencies": { + "@types/node": "^20.14.2", "bunchee": "5.3.0-beta.0" }, "dependencies": { "@aws-sdk/client-bedrock-runtime": "^3.600.0", - "@types/node": "^20.14.2", - "llamaindex": "workspace:*" + "@llamaindex/core": "workspace:*" } } diff --git a/packages/community/src/llm/bedrock/base.ts b/packages/community/src/llm/bedrock/base.ts index b50f6f1c2..1c04a88ab 100644 --- a/packages/community/src/llm/bedrock/base.ts +++ b/packages/community/src/llm/bedrock/base.ts @@ -4,18 +4,19 @@ import { InvokeModelCommand, InvokeModelWithResponseStreamCommand, } from "@aws-sdk/client-bedrock-runtime"; -import type { - ChatMessage, - ChatResponse, - CompletionResponse, - LLMChatParamsNonStreaming, - LLMChatParamsStreaming, - LLMCompletionParamsNonStreaming, - LLMCompletionParamsStreaming, - LLMMetadata, - ToolCallLLMMessageOptions, -} from "llamaindex"; -import { streamConverter, ToolCallLLM, wrapLLMEvent } from "llamaindex"; +import { + type ChatMessage, + type ChatResponse, + type CompletionResponse, + type LLMChatParamsNonStreaming, + type LLMChatParamsStreaming, + type LLMCompletionParamsNonStreaming, + type LLMCompletionParamsStreaming, + type LLMMetadata, + ToolCallLLM, + type ToolCallLLMMessageOptions, +} from "@llamaindex/core/llms"; +import { streamConverter, wrapLLMEvent } from "@llamaindex/core/utils"; import { type BedrockAdditionalChatOptions, type BedrockChatStreamResponse, diff --git a/packages/community/src/llm/bedrock/provider.ts b/packages/community/src/llm/bedrock/provider.ts index 99b615354..43aaed8b9 100644 --- a/packages/community/src/llm/bedrock/provider.ts +++ b/packages/community/src/llm/bedrock/provider.ts @@ -8,9 +8,9 @@ import { type ChatMessage, type ChatResponseChunk, type LLMMetadata, - streamConverter, type ToolCallLLMMessageOptions, -} from "llamaindex"; +} from "@llamaindex/core/llms"; +import { streamConverter } from "@llamaindex/core/utils"; import type { ToolChoice } from "./types"; import { toUtf8 } from "./utils"; diff --git a/packages/community/src/llm/bedrock/providers/anthropic.ts b/packages/community/src/llm/bedrock/providers/anthropic.ts index d68c1b721..9dadf782f 100644 --- a/packages/community/src/llm/bedrock/providers/anthropic.ts +++ b/packages/community/src/llm/bedrock/providers/anthropic.ts @@ -10,7 +10,7 @@ import type { PartialToolCall, ToolCall, ToolCallLLMMessageOptions, -} from "llamaindex"; +} from "@llamaindex/core/llms"; import { type BedrockAdditionalChatOptions, type BedrockChatStreamResponse, diff --git a/packages/community/src/llm/bedrock/providers/meta.ts b/packages/community/src/llm/bedrock/providers/meta.ts index 26eacf086..2e19ec9a5 100644 --- a/packages/community/src/llm/bedrock/providers/meta.ts +++ b/packages/community/src/llm/bedrock/providers/meta.ts @@ -2,7 +2,7 @@ import type { InvokeModelCommandInput, InvokeModelWithResponseStreamCommandInput, } from "@aws-sdk/client-bedrock-runtime"; -import type { ChatMessage, LLMMetadata } from "llamaindex"; +import type { ChatMessage, LLMMetadata } from "@llamaindex/core/llms"; import type { MetaNoneStreamingResponse, MetaStreamEvent } from "../types"; import { mapChatMessagesToMetaLlama2Messages, diff --git a/packages/community/src/llm/bedrock/utils.ts b/packages/community/src/llm/bedrock/utils.ts index c301d16b2..b26fc385f 100644 --- a/packages/community/src/llm/bedrock/utils.ts +++ b/packages/community/src/llm/bedrock/utils.ts @@ -1,13 +1,13 @@ +import type { JSONObject } from "@llamaindex/core/global"; import type { BaseTool, ChatMessage, - JSONObject, MessageContent, MessageContentDetail, MessageContentTextDetail, ToolCallLLMMessageOptions, ToolMetadata, -} from "llamaindex"; +} from "@llamaindex/core/llms"; import type { AnthropicContent, AnthropicImageContent, diff --git a/packages/core/package.json b/packages/core/package.json index f98082e9c..7913b487a 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -59,6 +59,20 @@ "types": "./dist/schema/index.d.ts", "default": "./dist/schema/index.js" } + }, + "./utils": { + "require": { + "types": "./dist/utils/index.d.cts", + "default": "./dist/utils/index.cjs" + }, + "import": { + "types": "./dist/utils/index.d.ts", + "default": "./dist/utils/index.js" + }, + "default": { + "types": "./dist/utils/index.d.ts", + "default": "./dist/utils/index.js" + } } }, "files": [ diff --git a/packages/core/src/global/index.ts b/packages/core/src/global/index.ts index 56dde8448..2c54f3a7e 100644 --- a/packages/core/src/global/index.ts +++ b/packages/core/src/global/index.ts @@ -1 +1,12 @@ export { Settings } from "./settings"; +export { CallbackManager } from "./settings/callback-manager"; +export type { + BaseEvent, + LLMEndEvent, + LLMStartEvent, + LLMStreamEvent, + LLMToolCallEvent, + LLMToolResultEvent, + LlamaIndexEventMaps, +} from "./settings/callback-manager"; +export type { JSONArray, JSONObject, JSONValue } from "./type"; diff --git a/packages/core/src/global/settings.ts b/packages/core/src/global/settings.ts index e63632e5e..4d8b18e80 100644 --- a/packages/core/src/global/settings.ts +++ b/packages/core/src/global/settings.ts @@ -1,3 +1,9 @@ +import { + type CallbackManager, + getCallbackManager, + setCallbackManager, + withCallbackManager, +} from "./settings/callback-manager"; import { getChunkSize, setChunkSize, @@ -14,4 +20,19 @@ export const Settings = { withChunkSize<Result>(chunkSize: number, fn: () => Result): Result { return withChunkSize(chunkSize, fn); }, + + get callbackManager(): CallbackManager { + return getCallbackManager(); + }, + + set callbackManager(callbackManager: CallbackManager) { + setCallbackManager(callbackManager); + }, + + withCallbackManager<Result>( + callbackManager: CallbackManager, + fn: () => Result, + ): Result { + return withCallbackManager(callbackManager, fn); + }, }; diff --git a/packages/core/src/global/settings/callback-manager.ts b/packages/core/src/global/settings/callback-manager.ts new file mode 100644 index 000000000..d85f9bb79 --- /dev/null +++ b/packages/core/src/global/settings/callback-manager.ts @@ -0,0 +1,131 @@ +import { AsyncLocalStorage, CustomEvent } from "@llamaindex/env"; +import type { + ChatMessage, + ChatResponse, + ChatResponseChunk, + ToolCall, + ToolOutput, +} from "../../llms"; +import type { UUID } from "../type"; + +export type BaseEvent<Payload> = CustomEvent<{ + payload: Readonly<Payload>; +}>; + +export type LLMStartEvent = BaseEvent<{ + id: UUID; + messages: ChatMessage[]; +}>; +export type LLMToolCallEvent = BaseEvent<{ + toolCall: ToolCall; +}>; +export type LLMToolResultEvent = BaseEvent<{ + toolCall: ToolCall; + toolResult: ToolOutput; +}>; +export type LLMEndEvent = BaseEvent<{ + id: UUID; + response: ChatResponse; +}>; +export type LLMStreamEvent = BaseEvent<{ + id: UUID; + chunk: ChatResponseChunk; +}>; + +export interface LlamaIndexEventMaps { + "llm-start": LLMStartEvent; + "llm-end": LLMEndEvent; + "llm-tool-call": LLMToolCallEvent; + "llm-tool-result": LLMToolResultEvent; + "llm-stream": LLMStreamEvent; +} + +export class LlamaIndexCustomEvent<T = any> extends CustomEvent<T> { + private constructor(event: string, options?: CustomEventInit) { + super(event, options); + } + + static fromEvent<Type extends keyof LlamaIndexEventMaps>( + type: Type, + detail: LlamaIndexEventMaps[Type]["detail"], + ) { + return new LlamaIndexCustomEvent(type, { + detail: detail, + }); + } +} + +type EventHandler<Event> = (event: Event) => void; + +export class CallbackManager { + #handlers = new Map<keyof LlamaIndexEventMaps, EventHandler<CustomEvent>[]>(); + + on<K extends keyof LlamaIndexEventMaps>( + event: K, + handler: EventHandler<LlamaIndexEventMaps[K]>, + ) { + if (!this.#handlers.has(event)) { + this.#handlers.set(event, []); + } + this.#handlers.get(event)!.push(handler); + return this; + } + + off<K extends keyof LlamaIndexEventMaps>( + event: K, + handler: EventHandler<LlamaIndexEventMaps[K]>, + ) { + if (!this.#handlers.has(event)) { + return this; + } + const cbs = this.#handlers.get(event)!; + const index = cbs.indexOf(handler); + if (index > -1) { + cbs.splice(index, 1); + } + return this; + } + + dispatchEvent<K extends keyof LlamaIndexEventMaps>( + event: K, + detail: LlamaIndexEventMaps[K]["detail"], + ) { + const cbs = this.#handlers.get(event); + if (!cbs) { + return; + } + queueMicrotask(() => { + cbs.forEach((handler) => + handler( + LlamaIndexCustomEvent.fromEvent(event, structuredClone(detail)), + ), + ); + }); + } +} + +export const globalCallbackManager = new CallbackManager(); + +const callbackManagerAsyncLocalStorage = + new AsyncLocalStorage<CallbackManager>(); + +let currentCallbackManager: CallbackManager | null = null; + +export function getCallbackManager(): CallbackManager { + return ( + callbackManagerAsyncLocalStorage.getStore() ?? + currentCallbackManager ?? + globalCallbackManager + ); +} + +export function setCallbackManager(callbackManager: CallbackManager) { + currentCallbackManager = callbackManager; +} + +export function withCallbackManager<Result>( + callbackManager: CallbackManager, + fn: () => Result, +): Result { + return callbackManagerAsyncLocalStorage.run(callbackManager, fn); +} diff --git a/packages/llamaindex/src/llm/base.ts b/packages/core/src/llms/base.ts similarity index 94% rename from packages/llamaindex/src/llm/base.ts rename to packages/core/src/llms/base.ts index e5633667a..b04defc65 100644 --- a/packages/llamaindex/src/llm/base.ts +++ b/packages/core/src/llms/base.ts @@ -1,3 +1,5 @@ +import { streamConverter } from "../utils"; +import { extractText } from "../utils/llms"; import type { ChatResponse, ChatResponseChunk, @@ -9,8 +11,7 @@ import type { LLMCompletionParamsStreaming, LLMMetadata, ToolCallLLMMessageOptions, -} from "@llamaindex/core/llms"; -import { extractText, streamConverter } from "./utils.js"; +} from "./type"; export abstract class BaseLLM< AdditionalChatOptions extends object = object, diff --git a/packages/core/src/llms/index.ts b/packages/core/src/llms/index.ts index c387fe087..febf206d3 100644 --- a/packages/core/src/llms/index.ts +++ b/packages/core/src/llms/index.ts @@ -1,3 +1,4 @@ +export { BaseLLM, ToolCallLLM } from "./base"; export type { BaseTool, BaseToolWithCall, diff --git a/packages/llamaindex/src/internal/context/EventCaller.ts b/packages/core/src/utils/event-caller.ts similarity index 89% rename from packages/llamaindex/src/internal/context/EventCaller.ts rename to packages/core/src/utils/event-caller.ts index ab7a2d9a1..7b57b34a8 100644 --- a/packages/llamaindex/src/internal/context/EventCaller.ts +++ b/packages/core/src/utils/event-caller.ts @@ -1,5 +1,14 @@ import { AsyncLocalStorage, randomUUID } from "@llamaindex/env"; -import { isAsyncIterable, isIterable } from "../utils.js"; + +export const isAsyncIterable = ( + obj: unknown, +): obj is AsyncIterable<unknown> => { + return obj != null && typeof obj === "object" && Symbol.asyncIterator in obj; +}; + +export const isIterable = (obj: unknown): obj is Iterable<unknown> => { + return obj != null && typeof obj === "object" && Symbol.iterator in obj; +}; const eventReasonAsyncLocalStorage = new AsyncLocalStorage<EventCaller>(); diff --git a/packages/core/src/utils/index.ts b/packages/core/src/utils/index.ts new file mode 100644 index 000000000..afa21bcd5 --- /dev/null +++ b/packages/core/src/utils/index.ts @@ -0,0 +1,54 @@ +export { wrapEventCaller } from "./event-caller"; + +export async function* streamConverter<S, D>( + stream: AsyncIterable<S>, + converter: (s: S) => D | null, +): AsyncIterable<D> { + for await (const data of stream) { + const newData = converter(data); + if (newData === null) { + return; + } + yield newData; + } +} + +export async function* streamCallbacks<S>( + stream: AsyncIterable<S>, + callbacks: { + finished?: (value?: S) => void; + }, +): AsyncIterable<S> { + let value: S | undefined; + for await (value of stream) { + yield value; + } + if (callbacks.finished) { + callbacks.finished(value); + } +} + +export async function* streamReducer<S, D>(params: { + stream: AsyncIterable<S>; + reducer: (previousValue: D, currentValue: S) => D; + initialValue: D; + finished?: (value: D) => void; +}): AsyncIterable<S> { + let value = params.initialValue; + for await (const data of params.stream) { + value = params.reducer(value, data); + yield data; + } + if (params.finished) { + params.finished(value); + } +} + +export { wrapLLMEvent } from "./wrap-llm-event"; + +export { + extractDataUrlComponents, + extractImage, + extractSingleText, + extractText, +} from "./llms"; diff --git a/packages/core/src/utils/llms.ts b/packages/core/src/utils/llms.ts new file mode 100644 index 000000000..8a54cb7f5 --- /dev/null +++ b/packages/core/src/utils/llms.ts @@ -0,0 +1,79 @@ +import type { + MessageContent, + MessageContentDetail, + MessageContentTextDetail, +} from "../llms"; +import type { ImageType } from "../schema"; + +/** + * Extracts just the text from a multi-modal message or the message itself if it's just text. + * + * @param message The message to extract text from. + * @returns The extracted text + */ +export function extractText(message: MessageContent): string { + if (typeof message !== "string" && !Array.isArray(message)) { + console.warn( + "extractText called with non-MessageContent message, this is likely a bug.", + ); + return `${message}`; + } else if (typeof message !== "string" && Array.isArray(message)) { + // message is of type MessageContentDetail[] - retrieve just the text parts and concatenate them + // so we can pass them to the context generator + return message + .filter((c): c is MessageContentTextDetail => c.type === "text") + .map((c) => c.text) + .join("\n\n"); + } else { + return message; + } +} + +/** + * Extracts a single text from a multi-modal message content + * + * @param message The message to extract images from. + * @returns The extracted images + */ +export function extractSingleText( + message: MessageContentDetail, +): string | null { + if (message.type === "text") { + return message.text; + } + return null; +} + +/** + * Extracts an image from a multi-modal message content + * + * @param message The message to extract images from. + * @returns The extracted images + */ +export function extractImage(message: MessageContentDetail): ImageType | null { + if (message.type === "image_url") { + return new URL(message.image_url.url); + } + return null; +} + +export const extractDataUrlComponents = ( + dataUrl: string, +): { + mimeType: string; + base64: string; +} => { + const parts = dataUrl.split(";base64,"); + + if (parts.length !== 2 || !parts[0].startsWith("data:")) { + throw new Error("Invalid data URL"); + } + + const mimeType = parts[0].slice(5); + const base64 = parts[1]; + + return { + mimeType, + base64, + }; +}; diff --git a/packages/core/src/utils/wrap-llm-event.ts b/packages/core/src/utils/wrap-llm-event.ts new file mode 100644 index 000000000..41f85e601 --- /dev/null +++ b/packages/core/src/utils/wrap-llm-event.ts @@ -0,0 +1,88 @@ +import { AsyncLocalStorage, randomUUID } from "@llamaindex/env"; +import { getCallbackManager } from "../global/settings/callback-manager"; +import type { ChatResponse, ChatResponseChunk, LLM, LLMChat } from "../llms"; + +export function wrapLLMEvent< + AdditionalChatOptions extends object = object, + AdditionalMessageOptions extends object = object, +>( + originalMethod: LLMChat< + AdditionalChatOptions, + AdditionalMessageOptions + >["chat"], + _context: ClassMethodDecoratorContext, +) { + return async function withLLMEvent( + this: LLM<AdditionalChatOptions, AdditionalMessageOptions>, + ...params: Parameters< + LLMChat<AdditionalChatOptions, AdditionalMessageOptions>["chat"] + > + ): ReturnType< + LLMChat<AdditionalChatOptions, AdditionalMessageOptions>["chat"] + > { + const id = randomUUID(); + getCallbackManager().dispatchEvent("llm-start", { + payload: { + id, + messages: params[0].messages, + }, + }); + const response = await originalMethod.call(this, ...params); + if (Symbol.asyncIterator in response) { + // save snapshot to restore it after the response is done + const snapshot = AsyncLocalStorage.snapshot(); + const originalAsyncIterator = { + [Symbol.asyncIterator]: response[Symbol.asyncIterator].bind(response), + }; + response[Symbol.asyncIterator] = async function* () { + const finalResponse = { + raw: [] as ChatResponseChunk[], + message: { + content: "", + role: "assistant", + options: {}, + }, + } satisfies ChatResponse; + let firstOne = false; + for await (const chunk of originalAsyncIterator) { + if (!firstOne) { + firstOne = true; + finalResponse.message.content = chunk.delta; + } else { + finalResponse.message.content += chunk.delta; + } + if (chunk.options) { + finalResponse.message.options = { + ...finalResponse.message.options, + ...chunk.options, + }; + } + getCallbackManager().dispatchEvent("llm-stream", { + payload: { + id, + chunk, + }, + }); + finalResponse.raw.push(chunk); + yield chunk; + } + snapshot(() => { + getCallbackManager().dispatchEvent("llm-end", { + payload: { + id, + response: finalResponse, + }, + }); + }); + }; + } else { + getCallbackManager().dispatchEvent("llm-end", { + payload: { + id, + response, + }, + }); + } + return response; + }; +} diff --git a/packages/core/tsconfig.json b/packages/core/tsconfig.json index a93775d95..e225d02d8 100644 --- a/packages/core/tsconfig.json +++ b/packages/core/tsconfig.json @@ -8,6 +8,7 @@ "moduleResolution": "Bundler", "skipLibCheck": true, "strict": true, + "lib": ["ESNext", "DOM"], "types": ["node"] }, "include": ["./src"], diff --git a/packages/llamaindex/e2e/fixtures/llm/openai.ts b/packages/llamaindex/e2e/fixtures/llm/openai.ts index 30d8b2cdb..46ceb0ca9 100644 --- a/packages/llamaindex/e2e/fixtures/llm/openai.ts +++ b/packages/llamaindex/e2e/fixtures/llm/openai.ts @@ -1,3 +1,4 @@ +import { extractText } from "@llamaindex/core/utils"; import type { ChatResponse, ChatResponseChunk, @@ -8,7 +9,6 @@ import type { LLMCompletionParamsNonStreaming, LLMCompletionParamsStreaming, } from "llamaindex"; -import { extractText } from "llamaindex/llm/utils"; import { deepStrictEqual, strictEqual } from "node:assert"; import { llmCompleteMockStorage } from "../../node/utils.js"; diff --git a/packages/llamaindex/e2e/node/claude.e2e.ts b/packages/llamaindex/e2e/node/claude.e2e.ts index c8fde2d90..4222c8f3e 100644 --- a/packages/llamaindex/e2e/node/claude.e2e.ts +++ b/packages/llamaindex/e2e/node/claude.e2e.ts @@ -1,7 +1,7 @@ +import { extractText } from "@llamaindex/core/utils"; import { consola } from "consola"; import { Anthropic, FunctionTool, Settings, type LLM } from "llamaindex"; import { AnthropicAgent } from "llamaindex/agent/anthropic"; -import { extractText } from "llamaindex/llm/utils"; import { ok } from "node:assert"; import { beforeEach, test } from "node:test"; import { getWeatherTool, sumNumbersTool } from "./fixtures/tools.js"; diff --git a/packages/llamaindex/e2e/node/openai.e2e.ts b/packages/llamaindex/e2e/node/openai.e2e.ts index 654584674..7193e4644 100644 --- a/packages/llamaindex/e2e/node/openai.e2e.ts +++ b/packages/llamaindex/e2e/node/openai.e2e.ts @@ -1,3 +1,4 @@ +import { extractText } from "@llamaindex/core/utils"; import { consola } from "consola"; import { Document, @@ -14,7 +15,6 @@ import { VectorStoreIndex, type LLM, } from "llamaindex"; -import { extractText } from "llamaindex/llm/utils"; import { ok, strictEqual } from "node:assert"; import { readFile } from "node:fs/promises"; import { join } from "node:path"; diff --git a/packages/llamaindex/e2e/node/react.e2e.ts b/packages/llamaindex/e2e/node/react.e2e.ts index f500e2041..c0bb8c46c 100644 --- a/packages/llamaindex/e2e/node/react.e2e.ts +++ b/packages/llamaindex/e2e/node/react.e2e.ts @@ -1,5 +1,5 @@ +import { extractText } from "@llamaindex/core/utils"; import { OpenAI, ReActAgent, Settings, type LLM } from "llamaindex"; -import { extractText } from "llamaindex/llm/utils"; import { ok } from "node:assert"; import { beforeEach, test } from "node:test"; import { getWeatherTool } from "./fixtures/tools.js"; diff --git a/packages/llamaindex/e2e/node/utils.ts b/packages/llamaindex/e2e/node/utils.ts index 92d58b84a..717fed9b1 100644 --- a/packages/llamaindex/e2e/node/utils.ts +++ b/packages/llamaindex/e2e/node/utils.ts @@ -4,7 +4,7 @@ import { type LLMEndEvent, type LLMStartEvent, type LLMStreamEvent, -} from "llamaindex"; +} from "@llamaindex/core/global"; import { readFile, writeFile } from "node:fs/promises"; import { join } from "node:path"; import { type test } from "node:test"; diff --git a/packages/llamaindex/e2e/package.json b/packages/llamaindex/e2e/package.json index f19db5e75..ff5d1baa1 100644 --- a/packages/llamaindex/e2e/package.json +++ b/packages/llamaindex/e2e/package.json @@ -10,6 +10,7 @@ }, "devDependencies": { "@faker-js/faker": "^8.4.1", + "@llamaindex/core": "workspace:*", "@types/node": "^20.12.11", "consola": "^3.2.3", "llamaindex": "workspace:*", diff --git a/packages/llamaindex/src/ChatHistory.ts b/packages/llamaindex/src/ChatHistory.ts index c6423b9e8..7fa9868f9 100644 --- a/packages/llamaindex/src/ChatHistory.ts +++ b/packages/llamaindex/src/ChatHistory.ts @@ -1,9 +1,9 @@ import type { ChatMessage, LLM, MessageType } from "@llamaindex/core/llms"; +import { extractText } from "@llamaindex/core/utils"; import { tokenizers, type Tokenizer } from "@llamaindex/env"; import type { SummaryPrompt } from "./Prompt.js"; import { defaultSummaryPrompt, messagesToHistoryStr } from "./Prompt.js"; import { OpenAI } from "./llm/openai.js"; -import { extractText } from "./llm/utils.js"; /** * A ChatHistory is used to keep the state of back and forth chat messages diff --git a/packages/llamaindex/src/EngineResponse.ts b/packages/llamaindex/src/EngineResponse.ts index 543f33f6b..132a63940 100644 --- a/packages/llamaindex/src/EngineResponse.ts +++ b/packages/llamaindex/src/EngineResponse.ts @@ -4,7 +4,7 @@ import type { ChatResponseChunk, } from "@llamaindex/core/llms"; import type { NodeWithScore } from "@llamaindex/core/schema"; -import { extractText } from "./llm/utils.js"; +import { extractText } from "@llamaindex/core/utils"; export class EngineResponse implements ChatResponse, ChatResponseChunk { sourceNodes?: NodeWithScore[]; diff --git a/packages/llamaindex/src/Settings.ts b/packages/llamaindex/src/Settings.ts index 12513125d..ab43fff54 100644 --- a/packages/llamaindex/src/Settings.ts +++ b/packages/llamaindex/src/Settings.ts @@ -1,5 +1,7 @@ -import { Settings as CoreSettings } from "@llamaindex/core/global"; -import { CallbackManager } from "./callbacks/CallbackManager.js"; +import { + type CallbackManager, + Settings as CoreSettings, +} from "@llamaindex/core/global"; import { OpenAI } from "./llm/openai.js"; import { PromptHelper } from "./PromptHelper.js"; @@ -9,11 +11,6 @@ import type { LLM } from "@llamaindex/core/llms"; import { AsyncLocalStorage, getEnv } from "@llamaindex/env"; import type { ServiceContext } from "./ServiceContext.js"; import type { BaseEmbedding } from "./embeddings/types.js"; -import { - getCallbackManager, - setCallbackManager, - withCallbackManager, -} from "./internal/settings/CallbackManager.js"; import { getEmbeddedModel, setEmbeddedModel, @@ -129,18 +126,18 @@ class GlobalSettings implements Config { } get callbackManager(): CallbackManager { - return getCallbackManager(); + return CoreSettings.callbackManager; } set callbackManager(callbackManager: CallbackManager) { - setCallbackManager(callbackManager); + CoreSettings.callbackManager = callbackManager; } withCallbackManager<Result>( callbackManager: CallbackManager, fn: () => Result, ): Result { - return withCallbackManager(callbackManager, fn); + return CoreSettings.withCallbackManager(callbackManager, fn); } set chunkSize(chunkSize: number | undefined) { diff --git a/packages/llamaindex/src/agent/base.ts b/packages/llamaindex/src/agent/base.ts index 7d1eea8a9..464f37b73 100644 --- a/packages/llamaindex/src/agent/base.ts +++ b/packages/llamaindex/src/agent/base.ts @@ -5,6 +5,7 @@ import type { MessageContent, ToolOutput, } from "@llamaindex/core/llms"; +import { wrapEventCaller } from "@llamaindex/core/utils"; import { ReadableStream, TransformStream, randomUUID } from "@llamaindex/env"; import { ChatHistory } from "../ChatHistory.js"; import { EngineResponse } from "../EngineResponse.js"; @@ -14,9 +15,7 @@ import { type ChatEngineParamsNonStreaming, type ChatEngineParamsStreaming, } from "../engines/chat/index.js"; -import { wrapEventCaller } from "../internal/context/EventCaller.js"; import { consoleLogger, emptyLogger } from "../internal/logger.js"; -import { getCallbackManager } from "../internal/settings/CallbackManager.js"; import { isAsyncIterable } from "../internal/utils.js"; import { ObjectRetriever } from "../objects/index.js"; import type { @@ -69,7 +68,7 @@ export function createTaskOutputStream< taskOutputs.push(output); controller.enqueue(output); }; - getCallbackManager().dispatchEvent("agent-start", { + Settings.callbackManager.dispatchEvent("agent-start", { payload: { startStep: step, }, @@ -93,7 +92,7 @@ export function createTaskOutputStream< "Final step(id, %s) reached, closing task.", step.id, ); - getCallbackManager().dispatchEvent("agent-end", { + Settings.callbackManager.dispatchEvent("agent-end", { payload: { endStep: step, }, diff --git a/packages/llamaindex/src/agent/react.ts b/packages/llamaindex/src/agent/react.ts index 51ade05ba..091348f53 100644 --- a/packages/llamaindex/src/agent/react.ts +++ b/packages/llamaindex/src/agent/react.ts @@ -1,3 +1,4 @@ +import type { JSONObject, JSONValue } from "@llamaindex/core/global"; import type { BaseTool, ChatMessage, @@ -5,15 +6,14 @@ import type { ChatResponseChunk, LLM, } from "@llamaindex/core/llms"; +import { extractText } from "@llamaindex/core/utils"; import { randomUUID, ReadableStream } from "@llamaindex/env"; import { getReACTAgentSystemHeader } from "../internal/prompt/react.js"; import { isAsyncIterable, stringifyJSONToMessageContent, } from "../internal/utils.js"; -import { extractText } from "../llm/utils.js"; import { Settings } from "../Settings.js"; -import type { JSONObject, JSONValue } from "../types.js"; import { AgentRunner, AgentWorker, type AgentParamsBase } from "./base.js"; import type { TaskHandler } from "./types.js"; import { diff --git a/packages/llamaindex/src/agent/types.ts b/packages/llamaindex/src/agent/types.ts index 45ec942b6..bc14da7fd 100644 --- a/packages/llamaindex/src/agent/types.ts +++ b/packages/llamaindex/src/agent/types.ts @@ -1,3 +1,4 @@ +import type { BaseEvent } from "@llamaindex/core/global"; import type { BaseToolWithCall, ChatMessage, @@ -9,7 +10,6 @@ import type { } from "@llamaindex/core/llms"; import { ReadableStream } from "@llamaindex/env"; import type { Logger } from "../internal/logger.js"; -import type { BaseEvent } from "../internal/type.js"; import type { UUID } from "../types.js"; export type AgentTaskContext< diff --git a/packages/llamaindex/src/agent/utils.ts b/packages/llamaindex/src/agent/utils.ts index 8bf49abc8..48962a8bf 100644 --- a/packages/llamaindex/src/agent/utils.ts +++ b/packages/llamaindex/src/agent/utils.ts @@ -1,3 +1,8 @@ +import { + type JSONObject, + type JSONValue, + Settings, +} from "@llamaindex/core/global"; import type { BaseTool, ChatMessage, @@ -14,13 +19,11 @@ import { baseToolWithCallSchema } from "@llamaindex/core/schema"; import { ReadableStream } from "@llamaindex/env"; import { z } from "zod"; import type { Logger } from "../internal/logger.js"; -import { getCallbackManager } from "../internal/settings/CallbackManager.js"; import { isAsyncIterable, prettifyError, stringifyJSONToMessageContent, } from "../internal/utils.js"; -import type { JSONObject, JSONValue } from "../types.js"; import type { AgentParamsBase } from "./base.js"; import type { TaskHandler } from "./types.js"; @@ -221,7 +224,7 @@ export async function callTool( }; } try { - getCallbackManager().dispatchEvent("llm-tool-call", { + Settings.callbackManager.dispatchEvent("llm-tool-call", { payload: { toolCall: { ...toolCall, input }, }, @@ -237,7 +240,7 @@ export async function callTool( output, isError: false, }; - getCallbackManager().dispatchEvent("llm-tool-result", { + Settings.callbackManager.dispatchEvent("llm-tool-result", { payload: { toolCall: { ...toolCall, input }, toolResult: { ...toolOutput }, diff --git a/packages/llamaindex/src/callbacks/CallbackManager.ts b/packages/llamaindex/src/callbacks/CallbackManager.ts deleted file mode 100644 index e37617245..000000000 --- a/packages/llamaindex/src/callbacks/CallbackManager.ts +++ /dev/null @@ -1,230 +0,0 @@ -import type { Anthropic } from "@anthropic-ai/sdk"; -import type { MessageContent } from "@llamaindex/core/llms"; -import type { NodeWithScore } from "@llamaindex/core/schema"; -import { CustomEvent } from "@llamaindex/env"; -import type { AgentEndEvent, AgentStartEvent } from "../agent/types.js"; -import { - EventCaller, - getEventCaller, -} from "../internal/context/EventCaller.js"; -import type { - LLMEndEvent, - LLMStartEvent, - LLMStreamEvent, - LLMToolCallEvent, - LLMToolResultEvent, - RetrievalEndEvent, - RetrievalStartEvent, -} from "../llm/types.js"; - -export class LlamaIndexCustomEvent<T = any> extends CustomEvent<T> { - reason: EventCaller | null; - private constructor( - event: string, - options?: CustomEventInit & { - reason?: EventCaller | null; - }, - ) { - super(event, options); - this.reason = options?.reason ?? null; - } - - static fromEvent<Type extends keyof LlamaIndexEventMaps>( - type: Type, - detail: LlamaIndexEventMaps[Type]["detail"], - ) { - return new LlamaIndexCustomEvent(type, { - detail: detail, - reason: getEventCaller(), - }); - } -} - -/** - * This type is used to define the event maps. - */ -export interface LlamaIndexEventMaps { - /** - * @deprecated - */ - retrieve: CustomEvent<RetrievalCallbackResponse>; - "retrieve-start": RetrievalStartEvent; - "retrieve-end": RetrievalEndEvent; - /** - * @deprecated - */ - stream: CustomEvent<StreamCallbackResponse>; - // llm events - "llm-start": LLMStartEvent; - "llm-end": LLMEndEvent; - "llm-tool-call": LLMToolCallEvent; - "llm-tool-result": LLMToolResultEvent; - "llm-stream": LLMStreamEvent; - // agent events - "agent-start": AgentStartEvent; - "agent-end": AgentEndEvent; -} - -//#region @deprecated remove in the next major version -//Specify StreamToken per mainstream LLM -export interface DefaultStreamToken { - id: string; - object: string; - created: number; - model: string; - choices: { - index: number; - delta: { - content?: string | null; - role?: "user" | "assistant" | "system" | "function" | "tool"; - }; - finish_reason: string | null; - }[]; -} - -//OpenAI stream token schema is the default. -//Note: Anthropic and Replicate also use similar token schemas. -export type OpenAIStreamToken = DefaultStreamToken; -export type AnthropicStreamToken = Anthropic.Completion; -// -//Callback Responses -// -//TODO: Write Embedding Callbacks - -//StreamCallbackResponse should let practitioners implement callbacks out of the box... -//When custom streaming LLMs are involved, people are expected to write their own StreamCallbackResponses -export interface StreamCallbackResponse { - index: number; - isDone?: boolean; - token?: DefaultStreamToken; -} - -export interface RetrievalCallbackResponse { - query: MessageContent; - nodes: NodeWithScore[]; -} - -interface CallbackManagerMethods { - /** - * onLLMStream is called when a token is streamed from the LLM. Defining this - * callback auto sets the stream = True flag on the openAI createChatCompletion request. - * @deprecated will be removed in the next major version - */ - onLLMStream: (params: StreamCallbackResponse) => Promise<void> | void; - /** - * onRetrieve is called as soon as the retriever finishes fetching relevant nodes. - * This callback allows you to handle the retrieved nodes even if the synthesizer - * is still running. - * @deprecated will be removed in the next major version - */ - onRetrieve: (params: RetrievalCallbackResponse) => Promise<void> | void; -} -//#endregion - -const noop: (...args: any[]) => any = () => void 0; - -type EventHandler<Event extends CustomEvent> = ( - event: Event & { - reason: EventCaller | null; - }, -) => void; - -export class CallbackManager implements CallbackManagerMethods { - /** - * @deprecated will be removed in the next major version - */ - get onLLMStream(): CallbackManagerMethods["onLLMStream"] { - return async (response) => { - await Promise.all( - this.#handlers - .get("stream")! - .map((handler) => - handler(LlamaIndexCustomEvent.fromEvent("stream", response)), - ), - ); - }; - } - - /** - * @deprecated will be removed in the next major version - */ - get onRetrieve(): CallbackManagerMethods["onRetrieve"] { - return async (response) => { - await Promise.all( - this.#handlers - .get("retrieve")! - .map((handler) => - handler(LlamaIndexCustomEvent.fromEvent("retrieve", response)), - ), - ); - }; - } - - /** - * @deprecated will be removed in the next major version - */ - set onLLMStream(_: never) { - throw new Error("onLLMStream is deprecated. Use on('stream') instead"); - } - - /** - * @deprecated will be removed in the next major version - */ - set onRetrieve(_: never) { - throw new Error("onRetrieve is deprecated. Use `on('retrieve')` instead"); - } - - #handlers = new Map<keyof LlamaIndexEventMaps, EventHandler<CustomEvent>[]>(); - - constructor(handlers?: Partial<CallbackManagerMethods>) { - const onLLMStream = handlers?.onLLMStream ?? noop; - this.on("stream", (event) => onLLMStream(event.detail)); - const onRetrieve = handlers?.onRetrieve ?? noop; - this.on("retrieve", (event) => onRetrieve(event.detail)); - } - - on< - K extends keyof LlamaIndexEventMaps, - H extends EventHandler<LlamaIndexEventMaps[K]>, - >(event: K, handler: H) { - if (!this.#handlers.has(event)) { - this.#handlers.set(event, []); - } - this.#handlers.get(event)!.push(handler); - return this; - } - - off< - K extends keyof LlamaIndexEventMaps, - H extends EventHandler<LlamaIndexEventMaps[K]>, - >(event: K, handler: H) { - if (!this.#handlers.has(event)) { - return; - } - const handlers = this.#handlers.get(event)!; - const index = handlers.indexOf(handler); - if (index > -1) { - handlers.splice(index, 1); - } - return this; - } - - dispatchEvent<K extends keyof LlamaIndexEventMaps>( - event: K, - detail: LlamaIndexEventMaps[K]["detail"], - ) { - const handlers = this.#handlers.get(event); - if (!handlers) { - return; - } - queueMicrotask(() => { - handlers.forEach((handler) => - handler( - LlamaIndexCustomEvent.fromEvent(event, { - ...detail, - }), - ), - ); - }); - } -} diff --git a/packages/llamaindex/src/cloud/LlamaCloudRetriever.ts b/packages/llamaindex/src/cloud/LlamaCloudRetriever.ts index c437d6e3d..29b8a55a1 100644 --- a/packages/llamaindex/src/cloud/LlamaCloudRetriever.ts +++ b/packages/llamaindex/src/cloud/LlamaCloudRetriever.ts @@ -4,12 +4,11 @@ import { type RetrievalParams, type TextNodeWithScore, } from "@llamaindex/cloud/api"; +import { Settings } from "@llamaindex/core/global"; import type { NodeWithScore } from "@llamaindex/core/schema"; import { jsonToNode, ObjectType } from "@llamaindex/core/schema"; +import { extractText, wrapEventCaller } from "@llamaindex/core/utils"; import type { BaseRetriever, RetrieveParams } from "../Retriever.js"; -import { wrapEventCaller } from "../internal/context/EventCaller.js"; -import { getCallbackManager } from "../internal/settings/CallbackManager.js"; -import { extractText } from "../llm/utils.js"; import type { ClientParams, CloudConstructorParams } from "./constants.js"; import { DEFAULT_PROJECT_NAME } from "./constants.js"; import { initService } from "./utils.js"; @@ -92,7 +91,7 @@ export class LlamaCloudRetriever implements BaseRetriever { const nodesWithScores = this.resultNodesToNodeWithScore( results.retrieval_nodes, ); - getCallbackManager().dispatchEvent("retrieve-end", { + Settings.callbackManager.dispatchEvent("retrieve-end", { payload: { query, nodes: nodesWithScores, diff --git a/packages/llamaindex/src/embeddings/DeepInfraEmbedding.ts b/packages/llamaindex/src/embeddings/DeepInfraEmbedding.ts index c3c03c6fe..27b033f65 100644 --- a/packages/llamaindex/src/embeddings/DeepInfraEmbedding.ts +++ b/packages/llamaindex/src/embeddings/DeepInfraEmbedding.ts @@ -1,6 +1,6 @@ import type { MessageContentDetail } from "@llamaindex/core/llms"; +import { extractSingleText } from "@llamaindex/core/utils"; import { getEnv } from "@llamaindex/env"; -import { extractSingleText } from "../llm/utils.js"; import { BaseEmbedding } from "./types.js"; const DEFAULT_MODEL = "sentence-transformers/clip-ViT-B-32"; diff --git a/packages/llamaindex/src/embeddings/MultiModalEmbedding.ts b/packages/llamaindex/src/embeddings/MultiModalEmbedding.ts index aabc43372..bd01ccc4d 100644 --- a/packages/llamaindex/src/embeddings/MultiModalEmbedding.ts +++ b/packages/llamaindex/src/embeddings/MultiModalEmbedding.ts @@ -7,7 +7,7 @@ import { type BaseNode, type ImageType, } from "@llamaindex/core/schema"; -import { extractImage, extractSingleText } from "../llm/utils.js"; +import { extractImage, extractSingleText } from "@llamaindex/core/utils"; import { BaseEmbedding, batchEmbeddings } from "./types.js"; /* diff --git a/packages/llamaindex/src/embeddings/types.ts b/packages/llamaindex/src/embeddings/types.ts index de32669e9..b14fdf2a5 100644 --- a/packages/llamaindex/src/embeddings/types.ts +++ b/packages/llamaindex/src/embeddings/types.ts @@ -1,9 +1,9 @@ import type { MessageContentDetail } from "@llamaindex/core/llms"; import type { BaseNode } from "@llamaindex/core/schema"; import { MetadataMode } from "@llamaindex/core/schema"; +import { extractSingleText } from "@llamaindex/core/utils"; import { type Tokenizers } from "@llamaindex/env"; import type { TransformComponent } from "../ingestion/types.js"; -import { extractSingleText } from "../llm/utils.js"; import { truncateMaxTokens } from "./tokenizer.js"; import { SimilarityType, similarity } from "./utils.js"; diff --git a/packages/llamaindex/src/engines/chat/CondenseQuestionChatEngine.ts b/packages/llamaindex/src/engines/chat/CondenseQuestionChatEngine.ts index 357088e97..099d90377 100644 --- a/packages/llamaindex/src/engines/chat/CondenseQuestionChatEngine.ts +++ b/packages/llamaindex/src/engines/chat/CondenseQuestionChatEngine.ts @@ -1,4 +1,9 @@ import type { ChatMessage, LLM } from "@llamaindex/core/llms"; +import { + extractText, + streamReducer, + wrapEventCaller, +} from "@llamaindex/core/utils"; import type { ChatHistory } from "../../ChatHistory.js"; import { getHistory } from "../../ChatHistory.js"; import type { EngineResponse } from "../../EngineResponse.js"; @@ -9,8 +14,6 @@ import { } from "../../Prompt.js"; import type { ServiceContext } from "../../ServiceContext.js"; import { llmFromSettingsOrContext } from "../../Settings.js"; -import { wrapEventCaller } from "../../internal/context/EventCaller.js"; -import { extractText, streamReducer } from "../../llm/utils.js"; import { PromptMixin } from "../../prompts/index.js"; import type { QueryEngine } from "../../types.js"; import type { diff --git a/packages/llamaindex/src/engines/chat/ContextChatEngine.ts b/packages/llamaindex/src/engines/chat/ContextChatEngine.ts index 6403c0595..3c78d7d45 100644 --- a/packages/llamaindex/src/engines/chat/ContextChatEngine.ts +++ b/packages/llamaindex/src/engines/chat/ContextChatEngine.ts @@ -4,18 +4,18 @@ import type { MessageContent, MessageType, } from "@llamaindex/core/llms"; +import { + extractText, + streamConverter, + streamReducer, + wrapEventCaller, +} from "@llamaindex/core/utils"; import type { ChatHistory } from "../../ChatHistory.js"; import { getHistory } from "../../ChatHistory.js"; import { EngineResponse } from "../../EngineResponse.js"; import type { ContextSystemPrompt } from "../../Prompt.js"; import type { BaseRetriever } from "../../Retriever.js"; import { Settings } from "../../Settings.js"; -import { wrapEventCaller } from "../../internal/context/EventCaller.js"; -import { - extractText, - streamConverter, - streamReducer, -} from "../../llm/utils.js"; import type { BaseNodePostprocessor } from "../../postprocessors/index.js"; import { PromptMixin } from "../../prompts/Mixin.js"; import { DefaultContextGenerator } from "./DefaultContextGenerator.js"; diff --git a/packages/llamaindex/src/engines/chat/SimpleChatEngine.ts b/packages/llamaindex/src/engines/chat/SimpleChatEngine.ts index fcb934e00..8e8cb38ba 100644 --- a/packages/llamaindex/src/engines/chat/SimpleChatEngine.ts +++ b/packages/llamaindex/src/engines/chat/SimpleChatEngine.ts @@ -1,10 +1,13 @@ import type { LLM } from "@llamaindex/core/llms"; +import { + streamConverter, + streamReducer, + wrapEventCaller, +} from "@llamaindex/core/utils"; import type { ChatHistory } from "../../ChatHistory.js"; import { getHistory } from "../../ChatHistory.js"; import { EngineResponse } from "../../EngineResponse.js"; import { Settings } from "../../Settings.js"; -import { wrapEventCaller } from "../../internal/context/EventCaller.js"; -import { streamConverter, streamReducer } from "../../llm/utils.js"; import type { ChatEngine, ChatEngineParamsNonStreaming, diff --git a/packages/llamaindex/src/engines/query/RetrieverQueryEngine.ts b/packages/llamaindex/src/engines/query/RetrieverQueryEngine.ts index 86a01df44..1ab37aef0 100644 --- a/packages/llamaindex/src/engines/query/RetrieverQueryEngine.ts +++ b/packages/llamaindex/src/engines/query/RetrieverQueryEngine.ts @@ -1,9 +1,9 @@ import type { NodeWithScore } from "@llamaindex/core/schema"; +import { wrapEventCaller } from "@llamaindex/core/utils"; import type { EngineResponse } from "../../EngineResponse.js"; -import type { BaseRetriever } from "../../Retriever.js"; -import { wrapEventCaller } from "../../internal/context/EventCaller.js"; import type { BaseNodePostprocessor } from "../../postprocessors/index.js"; import { PromptMixin } from "../../prompts/Mixin.js"; +import type { BaseRetriever } from "../../Retriever.js"; import type { BaseSynthesizer } from "../../synthesizers/index.js"; import { ResponseSynthesizer } from "../../synthesizers/index.js"; import type { diff --git a/packages/llamaindex/src/engines/query/SubQuestionQueryEngine.ts b/packages/llamaindex/src/engines/query/SubQuestionQueryEngine.ts index 34f3912ef..dac3a079a 100644 --- a/packages/llamaindex/src/engines/query/SubQuestionQueryEngine.ts +++ b/packages/llamaindex/src/engines/query/SubQuestionQueryEngine.ts @@ -17,7 +17,7 @@ import type { } from "../../types.js"; import type { BaseTool, ToolMetadata } from "@llamaindex/core/llms"; -import { wrapEventCaller } from "../../internal/context/EventCaller.js"; +import { wrapEventCaller } from "@llamaindex/core/utils"; import type { BaseQuestionGenerator, SubQuestion } from "./types.js"; /** diff --git a/packages/llamaindex/src/evaluation/Correctness.ts b/packages/llamaindex/src/evaluation/Correctness.ts index acb177395..ba814bf80 100644 --- a/packages/llamaindex/src/evaluation/Correctness.ts +++ b/packages/llamaindex/src/evaluation/Correctness.ts @@ -1,6 +1,6 @@ import type { ChatMessage, LLM } from "@llamaindex/core/llms"; import { MetadataMode } from "@llamaindex/core/schema"; -import { extractText } from "../llm/utils.js"; +import { extractText } from "@llamaindex/core/utils"; import { PromptMixin } from "../prompts/Mixin.js"; import type { ServiceContext } from "../ServiceContext.js"; import { llmFromSettingsOrContext } from "../Settings.js"; diff --git a/packages/llamaindex/src/index.edge.ts b/packages/llamaindex/src/index.edge.ts index 6e060b603..95a512f16 100644 --- a/packages/llamaindex/src/index.edge.ts +++ b/packages/llamaindex/src/index.edge.ts @@ -1,7 +1,31 @@ +import type { AgentEndEvent, AgentStartEvent } from "./agent/types.js"; +import type { RetrievalEndEvent, RetrievalStartEvent } from "./llm/types.js"; + +declare module "@llamaindex/core/global" { + export interface LlamaIndexEventMaps { + "retrieve-start": RetrievalStartEvent; + "retrieve-end": RetrievalEndEvent; + // agent events + "agent-start": AgentStartEvent; + "agent-end": AgentEndEvent; + } +} + +export { CallbackManager } from "@llamaindex/core/global"; +export type { + BaseEvent, + JSONArray, + JSONObject, + JSONValue, + LLMEndEvent, + LLMStartEvent, + LLMStreamEvent, + LLMToolCallEvent, + LLMToolResultEvent, +} from "@llamaindex/core/global"; export * from "@llamaindex/core/llms"; export * from "@llamaindex/core/schema"; export * from "./agent/index.js"; -export * from "./callbacks/CallbackManager.js"; export * from "./ChatHistory.js"; export * from "./cloud/index.js"; export * from "./constants.js"; diff --git a/packages/llamaindex/src/indices/keyword/index.ts b/packages/llamaindex/src/indices/keyword/index.ts index 1d8f4f4b2..6bfb5da04 100644 --- a/packages/llamaindex/src/indices/keyword/index.ts +++ b/packages/llamaindex/src/indices/keyword/index.ts @@ -32,8 +32,8 @@ import { } from "./utils.js"; import type { LLM } from "@llamaindex/core/llms"; +import { extractText } from "@llamaindex/core/utils"; import { llmFromSettingsOrContext } from "../../Settings.js"; -import { extractText } from "../../llm/utils.js"; export interface KeywordIndexOptions { nodes?: BaseNode[]; diff --git a/packages/llamaindex/src/indices/summary/index.ts b/packages/llamaindex/src/indices/summary/index.ts index 93d2341dc..7800070a8 100644 --- a/packages/llamaindex/src/indices/summary/index.ts +++ b/packages/llamaindex/src/indices/summary/index.ts @@ -3,19 +3,17 @@ import type { Document, NodeWithScore, } from "@llamaindex/core/schema"; +import { extractText, wrapEventCaller } from "@llamaindex/core/utils"; import _ from "lodash"; import type { ChoiceSelectPrompt } from "../../Prompt.js"; import { defaultChoiceSelectPrompt } from "../../Prompt.js"; import type { BaseRetriever, RetrieveParams } from "../../Retriever.js"; import type { ServiceContext } from "../../ServiceContext.js"; import { - Settings, llmFromSettingsOrContext, nodeParserFromSettingsOrContext, } from "../../Settings.js"; import { RetrieverQueryEngine } from "../../engines/query/index.js"; -import { wrapEventCaller } from "../../internal/context/EventCaller.js"; -import { extractText } from "../../llm/utils.js"; import type { BaseNodePostprocessor } from "../../postprocessors/index.js"; import type { StorageContext } from "../../storage/StorageContext.js"; import { storageContextFromDefaults } from "../../storage/StorageContext.js"; @@ -296,17 +294,10 @@ export class SummaryIndexRetriever implements BaseRetriever { async retrieve({ query }: RetrieveParams): Promise<NodeWithScore[]> { const nodeIds = this.index.indexStruct.nodes; const nodes = await this.index.docStore.getNodes(nodeIds); - const result = nodes.map((node) => ({ + return nodes.map((node) => ({ node: node, score: 1, })); - - Settings.callbackManager.dispatchEvent("retrieve", { - query, - nodes: result, - }); - - return result; } } @@ -376,11 +367,6 @@ export class SummaryIndexLLMRetriever implements BaseRetriever { results.push(...nodeWithScores); } - Settings.callbackManager.dispatchEvent("retrieve", { - query, - nodes: results, - }); - return results; } } diff --git a/packages/llamaindex/src/indices/vectorStore/index.ts b/packages/llamaindex/src/indices/vectorStore/index.ts index 4eadc0d36..0404ea669 100644 --- a/packages/llamaindex/src/indices/vectorStore/index.ts +++ b/packages/llamaindex/src/indices/vectorStore/index.ts @@ -1,3 +1,4 @@ +import { Settings } from "@llamaindex/core/global"; import type { MessageContent } from "@llamaindex/core/llms"; import { ImageNode, @@ -8,6 +9,7 @@ import { type Document, type NodeWithScore, } from "@llamaindex/core/schema"; +import { wrapEventCaller } from "@llamaindex/core/utils"; import type { BaseRetriever, RetrieveParams } from "../../Retriever.js"; import type { ServiceContext } from "../../ServiceContext.js"; import { nodeParserFromSettingsOrContext } from "../../Settings.js"; @@ -22,8 +24,6 @@ import { DocStoreStrategy, createDocStoreStrategy, } from "../../ingestion/strategies/index.js"; -import { wrapEventCaller } from "../../internal/context/EventCaller.js"; -import { getCallbackManager } from "../../internal/settings/CallbackManager.js"; import type { BaseNodePostprocessor } from "../../postprocessors/types.js"; import type { StorageContext } from "../../storage/StorageContext.js"; import { storageContextFromDefaults } from "../../storage/StorageContext.js"; @@ -413,7 +413,7 @@ export class VectorIndexRetriever implements BaseRetriever { query, preFilters, }: RetrieveParams): Promise<NodeWithScore[]> { - getCallbackManager().dispatchEvent("retrieve-start", { + Settings.callbackManager.dispatchEvent("retrieve-start", { payload: { query, }, @@ -432,17 +432,12 @@ export class VectorIndexRetriever implements BaseRetriever { ), ); } - getCallbackManager().dispatchEvent("retrieve-end", { + Settings.callbackManager.dispatchEvent("retrieve-end", { payload: { query, nodes: nodesWithScores, }, }); - // send deprecated event - getCallbackManager().dispatchEvent("retrieve", { - query, - nodes: nodesWithScores, - }); return nodesWithScores; } diff --git a/packages/llamaindex/src/internal/settings/CallbackManager.ts b/packages/llamaindex/src/internal/settings/CallbackManager.ts deleted file mode 100644 index 55660a14b..000000000 --- a/packages/llamaindex/src/internal/settings/CallbackManager.ts +++ /dev/null @@ -1,25 +0,0 @@ -import { AsyncLocalStorage } from "@llamaindex/env"; -import { CallbackManager } from "../../callbacks/CallbackManager.js"; - -const callbackManagerAsyncLocalStorage = - new AsyncLocalStorage<CallbackManager>(); -let globalCallbackManager: CallbackManager | null = null; - -export function getCallbackManager(): CallbackManager { - if (globalCallbackManager === null) { - globalCallbackManager = new CallbackManager(); - } - - return callbackManagerAsyncLocalStorage.getStore() ?? globalCallbackManager; -} - -export function setCallbackManager(callbackManager: CallbackManager) { - globalCallbackManager = callbackManager; -} - -export function withCallbackManager<Result>( - callbackManager: CallbackManager, - fn: () => Result, -): Result { - return callbackManagerAsyncLocalStorage.run(callbackManager, fn); -} diff --git a/packages/llamaindex/src/internal/type.ts b/packages/llamaindex/src/internal/type.ts deleted file mode 100644 index 8421d5d1f..000000000 --- a/packages/llamaindex/src/internal/type.ts +++ /dev/null @@ -1,5 +0,0 @@ -import type { CustomEvent } from "@llamaindex/env"; - -export type BaseEvent<Payload extends Record<string, unknown>> = CustomEvent<{ - payload: Readonly<Payload>; -}>; diff --git a/packages/llamaindex/src/internal/utils.ts b/packages/llamaindex/src/internal/utils.ts index 6fe3e8166..a587290e2 100644 --- a/packages/llamaindex/src/internal/utils.ts +++ b/packages/llamaindex/src/internal/utils.ts @@ -1,4 +1,4 @@ -import type { JSONValue } from "../types.js"; +import type { JSONValue } from "@llamaindex/core/global"; export const isAsyncIterable = ( obj: unknown, diff --git a/packages/llamaindex/src/llm/anthropic.ts b/packages/llamaindex/src/llm/anthropic.ts index 19aa677cf..0e8b1b06b 100644 --- a/packages/llamaindex/src/llm/anthropic.ts +++ b/packages/llamaindex/src/llm/anthropic.ts @@ -26,10 +26,10 @@ import type { LLMChatParamsStreaming, ToolCallLLMMessageOptions, } from "@llamaindex/core/llms"; +import { ToolCallLLM } from "@llamaindex/core/llms"; +import { extractText, wrapLLMEvent } from "@llamaindex/core/utils"; import { getEnv } from "@llamaindex/env"; import _ from "lodash"; -import { ToolCallLLM } from "./base.js"; -import { extractText, wrapLLMEvent } from "./utils.js"; export class AnthropicSession { anthropic: SDKAnthropic; diff --git a/packages/llamaindex/src/llm/gemini/base.ts b/packages/llamaindex/src/llm/gemini/base.ts index 994a8d821..0b05c2848 100644 --- a/packages/llamaindex/src/llm/gemini/base.ts +++ b/packages/llamaindex/src/llm/gemini/base.ts @@ -15,9 +15,9 @@ import type { ToolCall, ToolCallLLMMessageOptions, } from "@llamaindex/core/llms"; +import { ToolCallLLM } from "@llamaindex/core/llms"; +import { streamConverter, wrapLLMEvent } from "@llamaindex/core/utils"; import { getEnv, randomUUID } from "@llamaindex/env"; -import { ToolCallLLM } from "../base.js"; -import { streamConverter, wrapLLMEvent } from "../utils.js"; import { GEMINI_BACKENDS, GEMINI_MODEL, diff --git a/packages/llamaindex/src/llm/gemini/utils.ts b/packages/llamaindex/src/llm/gemini/utils.ts index 50ad3b1bc..85dc53a36 100644 --- a/packages/llamaindex/src/llm/gemini/utils.ts +++ b/packages/llamaindex/src/llm/gemini/utils.ts @@ -15,7 +15,7 @@ import type { MessageType, ToolCallLLMMessageOptions, } from "@llamaindex/core/llms"; -import { extractDataUrlComponents } from "../utils.js"; +import { extractDataUrlComponents } from "@llamaindex/core/utils"; import type { ChatContext, FileDataPart, diff --git a/packages/llamaindex/src/llm/gemini/vertex.ts b/packages/llamaindex/src/llm/gemini/vertex.ts index 96fd6b903..a8906c0cb 100644 --- a/packages/llamaindex/src/llm/gemini/vertex.ts +++ b/packages/llamaindex/src/llm/gemini/vertex.ts @@ -19,8 +19,8 @@ import type { ToolCall, ToolCallLLMMessageOptions, } from "@llamaindex/core/llms"; +import { streamConverter } from "@llamaindex/core/utils"; import { getEnv, randomUUID } from "@llamaindex/env"; -import { streamConverter } from "../utils.js"; import { DEFAULT_SAFETY_SETTINGS, getFunctionCalls, getText } from "./utils.js"; /* To use Google's Vertex AI backend, it doesn't use api key authentication. diff --git a/packages/llamaindex/src/llm/huggingface.ts b/packages/llamaindex/src/llm/huggingface.ts index 5965973c3..aa9f8aa2e 100644 --- a/packages/llamaindex/src/llm/huggingface.ts +++ b/packages/llamaindex/src/llm/huggingface.ts @@ -1,21 +1,22 @@ import { HfInference } from "@huggingface/inference"; -import type { - ChatMessage, - ChatResponse, - ChatResponseChunk, - LLMChatParamsNonStreaming, - LLMChatParamsStreaming, - LLMMetadata, - ToolCallLLMMessageOptions, +import "@llamaindex/core/llms"; +import { + BaseLLM, + type ChatMessage, + type ChatResponse, + type ChatResponseChunk, + type LLMChatParamsNonStreaming, + type LLMChatParamsStreaming, + type LLMMetadata, + type ToolCallLLMMessageOptions, } from "@llamaindex/core/llms"; +import { streamConverter, wrapLLMEvent } from "@llamaindex/core/utils"; import type { PreTrainedModel, PreTrainedTokenizer, Tensor, } from "@xenova/transformers"; import { lazyLoadTransformers } from "../internal/deps/transformers.js"; -import { BaseLLM } from "./base.js"; -import { streamConverter, wrapLLMEvent } from "./utils.js"; // TODO workaround issue with @huggingface/inference@2.7.0 interface HfInferenceOptions { diff --git a/packages/llamaindex/src/llm/index.ts b/packages/llamaindex/src/llm/index.ts index fe069cedb..c5e9e30b3 100644 --- a/packages/llamaindex/src/llm/index.ts +++ b/packages/llamaindex/src/llm/index.ts @@ -4,10 +4,8 @@ export { ALL_AVAILABLE_V3_MODELS, Anthropic, } from "./anthropic.js"; -export { ToolCallLLM } from "./base.js"; export { FireworksLLM } from "./fireworks.js"; export { Gemini, GeminiSession } from "./gemini/base.js"; -export { streamConverter, streamReducer, wrapLLMEvent } from "./utils.js"; export { GEMINI_MODEL, diff --git a/packages/llamaindex/src/llm/mistral.ts b/packages/llamaindex/src/llm/mistral.ts index da7be6751..782602a74 100644 --- a/packages/llamaindex/src/llm/mistral.ts +++ b/packages/llamaindex/src/llm/mistral.ts @@ -1,14 +1,12 @@ -import type { - ChatMessage, - ChatResponse, - ChatResponseChunk, - LLMChatParamsNonStreaming, - LLMChatParamsStreaming, +import { + BaseLLM, + type ChatMessage, + type ChatResponse, + type ChatResponseChunk, + type LLMChatParamsNonStreaming, + type LLMChatParamsStreaming, } from "@llamaindex/core/llms"; import { getEnv } from "@llamaindex/env"; -import { Settings } from "../Settings.js"; -import { type StreamCallbackResponse } from "../callbacks/CallbackManager.js"; -import { BaseLLM } from "./base.js"; export const ALL_AVAILABLE_MISTRAL_MODELS = { "mistral-tiny": { contextWindow: 32000 }, @@ -123,16 +121,6 @@ export class MistralAI extends BaseLLM { if (!part.choices.length) continue; part.choices[0].index = idx_counter; - const isDone: boolean = - part.choices[0].finish_reason === "stop" ? true : false; - - const stream_callback: StreamCallbackResponse = { - index: idx_counter, - isDone: isDone, - token: part, - }; - - Settings.callbackManager.dispatchEvent("stream", stream_callback); idx_counter++; diff --git a/packages/llamaindex/src/llm/ollama.ts b/packages/llamaindex/src/llm/ollama.ts index 44e9ede45..1683961d2 100644 --- a/packages/llamaindex/src/llm/ollama.ts +++ b/packages/llamaindex/src/llm/ollama.ts @@ -9,6 +9,7 @@ import type { LLMCompletionParamsStreaming, LLMMetadata, } from "@llamaindex/core/llms"; +import { extractText, streamConverter } from "@llamaindex/core/utils"; import { BaseEmbedding } from "../embeddings/types.js"; import { Ollama as OllamaBase, @@ -30,7 +31,6 @@ import { type ShowResponse, type StatusResponse, } from "../internal/deps/ollama.js"; -import { extractText, streamConverter } from "./utils.js"; const messageAccessor = (part: OllamaChatResponse): ChatResponseChunk => { return { diff --git a/packages/llamaindex/src/llm/openai.ts b/packages/llamaindex/src/llm/openai.ts index 65813b882..34af8c146 100644 --- a/packages/llamaindex/src/llm/openai.ts +++ b/packages/llamaindex/src/llm/openai.ts @@ -7,19 +7,25 @@ import type { } from "openai"; import { AzureOpenAI, OpenAI as OrigOpenAI } from "openai"; -import type { - BaseTool, - ChatMessage, - ChatResponse, - ChatResponseChunk, - LLM, - LLMChatParamsNonStreaming, - LLMChatParamsStreaming, - LLMMetadata, - MessageType, - PartialToolCall, - ToolCallLLMMessageOptions, +import { + type BaseTool, + type ChatMessage, + type ChatResponse, + type ChatResponseChunk, + type LLM, + type LLMChatParamsNonStreaming, + type LLMChatParamsStreaming, + type LLMMetadata, + type MessageType, + type PartialToolCall, + ToolCallLLM, + type ToolCallLLMMessageOptions, } from "@llamaindex/core/llms"; +import { + extractText, + wrapEventCaller, + wrapLLMEvent, +} from "@llamaindex/core/utils"; import { Tokenizers } from "@llamaindex/env"; import type { ChatCompletionAssistantMessageParam, @@ -31,16 +37,12 @@ import type { ChatCompletionUserMessageParam, } from "openai/resources/chat/completions"; import type { ChatCompletionMessageParam } from "openai/resources/index.js"; -import { wrapEventCaller } from "../internal/context/EventCaller.js"; -import { getCallbackManager } from "../internal/settings/CallbackManager.js"; import type { AzureOpenAIConfig } from "./azure.js"; import { getAzureConfigFromEnv, getAzureModel, shouldUseAzure, } from "./azure.js"; -import { ToolCallLLM } from "./base.js"; -import { extractText, wrapLLMEvent } from "./utils.js"; export class OpenAISession { openai: Pick<OrigOpenAI, "chat" | "embeddings">; @@ -390,8 +392,6 @@ export class OpenAI extends ToolCallLLM<OpenAIAdditionalChatOptions> { }); // TODO: add callback to streamConverter and use streamConverter here - //Indices - let idxCounter: number = 0; // this will be used to keep track of the current tool call, make sure input are valid json object. let currentToolCall: PartialToolCall | null = null; const toolCallMap = new Map<string, PartialToolCall>(); @@ -428,12 +428,6 @@ export class OpenAI extends ToolCallLLM<OpenAIAdditionalChatOptions> { const isDone: boolean = choice.finish_reason !== null; - getCallbackManager().dispatchEvent("stream", { - index: idxCounter++, - isDone: isDone, - token: part, - }); - if (isDone && currentToolCall) { // for the last one, we need to emit the tool call shouldEmitToolCall = { diff --git a/packages/llamaindex/src/llm/portkey.ts b/packages/llamaindex/src/llm/portkey.ts index 3ee36bf13..b0213decb 100644 --- a/packages/llamaindex/src/llm/portkey.ts +++ b/packages/llamaindex/src/llm/portkey.ts @@ -1,20 +1,18 @@ -import type { - ChatMessage, - ChatResponse, - ChatResponseChunk, - LLMChatParamsNonStreaming, - LLMChatParamsStreaming, - LLMMetadata, - MessageType, +import { + BaseLLM, + type ChatMessage, + type ChatResponse, + type ChatResponseChunk, + type LLMChatParamsNonStreaming, + type LLMChatParamsStreaming, + type LLMMetadata, + type MessageType, } from "@llamaindex/core/llms"; +import { extractText, wrapLLMEvent } from "@llamaindex/core/utils"; import { getEnv } from "@llamaindex/env"; import _ from "lodash"; import type { LLMOptions } from "portkey-ai"; import { Portkey as OrigPortKey } from "portkey-ai"; -import { type StreamCallbackResponse } from "../callbacks/CallbackManager.js"; -import { getCallbackManager } from "../internal/settings/CallbackManager.js"; -import { BaseLLM } from "./base.js"; -import { extractText, wrapLLMEvent } from "./utils.js"; interface PortkeyOptions { apiKey?: string; @@ -136,18 +134,7 @@ export class Portkey extends BaseLLM { //Indices let idx_counter: number = 0; for await (const part of chunkStream) { - //Increment part.choices[0].index = idx_counter; - const is_done: boolean = - part.choices[0].finish_reason === "stop" ? true : false; - //onLLMStream Callback - - const stream_callback: StreamCallbackResponse = { - index: idx_counter, - isDone: is_done, - // token: part, - }; - getCallbackManager().dispatchEvent("stream", stream_callback); idx_counter++; diff --git a/packages/llamaindex/src/llm/replicate_ai.ts b/packages/llamaindex/src/llm/replicate_ai.ts index 091abdb78..9d83a68f0 100644 --- a/packages/llamaindex/src/llm/replicate_ai.ts +++ b/packages/llamaindex/src/llm/replicate_ai.ts @@ -1,20 +1,20 @@ -import type { - ChatMessage, - ChatResponse, - ChatResponseChunk, - LLMChatParamsNonStreaming, - LLMChatParamsStreaming, - MessageType, +import { + BaseLLM, + type ChatMessage, + type ChatResponse, + type ChatResponseChunk, + type LLMChatParamsNonStreaming, + type LLMChatParamsStreaming, + type MessageType, } from "@llamaindex/core/llms"; -import { getEnv } from "@llamaindex/env"; -import Replicate from "../internal/deps/replicate.js"; -import { BaseLLM } from "./base.js"; import { extractText, streamCallbacks, streamConverter, wrapLLMEvent, -} from "./utils.js"; +} from "@llamaindex/core/utils"; +import { getEnv } from "@llamaindex/env"; +import Replicate from "../internal/deps/replicate.js"; export class ReplicateSession { replicateKey: string | null = null; diff --git a/packages/llamaindex/src/llm/types.ts b/packages/llamaindex/src/llm/types.ts index eee81169d..d8f95c790 100644 --- a/packages/llamaindex/src/llm/types.ts +++ b/packages/llamaindex/src/llm/types.ts @@ -1,14 +1,6 @@ -import type { - ChatMessage, - ChatResponse, - ChatResponseChunk, - MessageContent, - ToolCall, - ToolOutput, -} from "@llamaindex/core/llms"; +import type { BaseEvent } from "@llamaindex/core/global"; +import type { MessageContent } from "@llamaindex/core/llms"; import type { NodeWithScore } from "@llamaindex/core/schema"; -import type { BaseEvent } from "../internal/type.js"; -import type { UUID } from "../types.js"; export type RetrievalStartEvent = BaseEvent<{ query: MessageContent; @@ -17,22 +9,3 @@ export type RetrievalEndEvent = BaseEvent<{ query: MessageContent; nodes: NodeWithScore[]; }>; -export type LLMStartEvent = BaseEvent<{ - id: UUID; - messages: ChatMessage[]; -}>; -export type LLMToolCallEvent = BaseEvent<{ - toolCall: ToolCall; -}>; -export type LLMToolResultEvent = BaseEvent<{ - toolCall: ToolCall; - toolResult: ToolOutput; -}>; -export type LLMEndEvent = BaseEvent<{ - id: UUID; - response: ChatResponse; -}>; -export type LLMStreamEvent = BaseEvent<{ - id: UUID; - chunk: ChatResponseChunk; -}>; diff --git a/packages/llamaindex/src/llm/utils.ts b/packages/llamaindex/src/llm/utils.ts deleted file mode 100644 index 6a640f368..000000000 --- a/packages/llamaindex/src/llm/utils.ts +++ /dev/null @@ -1,217 +0,0 @@ -import type { - ChatResponse, - ChatResponseChunk, - LLM, - LLMChat, - MessageContent, - MessageContentDetail, - MessageContentTextDetail, -} from "@llamaindex/core/llms"; -import type { ImageType } from "@llamaindex/core/schema"; -import { AsyncLocalStorage, randomUUID } from "@llamaindex/env"; -import { getCallbackManager } from "../internal/settings/CallbackManager.js"; - -export async function* streamConverter<S, D>( - stream: AsyncIterable<S>, - converter: (s: S) => D | null, -): AsyncIterable<D> { - for await (const data of stream) { - const newData = converter(data); - if (newData === null) { - return; - } - yield newData; - } -} - -export async function* streamCallbacks<S>( - stream: AsyncIterable<S>, - callbacks: { - finished?: (value?: S) => void; - }, -): AsyncIterable<S> { - let value: S | undefined; - for await (value of stream) { - yield value; - } - if (callbacks.finished) { - callbacks.finished(value); - } -} - -export async function* streamReducer<S, D>(params: { - stream: AsyncIterable<S>; - reducer: (previousValue: D, currentValue: S) => D; - initialValue: D; - finished?: (value: D) => void; -}): AsyncIterable<S> { - let value = params.initialValue; - for await (const data of params.stream) { - value = params.reducer(value, data); - yield data; - } - if (params.finished) { - params.finished(value); - } -} - -/** - * Extracts just the text from a multi-modal message or the message itself if it's just text. - * - * @param message The message to extract text from. - * @returns The extracted text - */ -export function extractText(message: MessageContent): string { - if (typeof message !== "string" && !Array.isArray(message)) { - console.warn( - "extractText called with non-MessageContent message, this is likely a bug.", - ); - return `${message}`; - } else if (typeof message !== "string" && Array.isArray(message)) { - // message is of type MessageContentDetail[] - retrieve just the text parts and concatenate them - // so we can pass them to the context generator - return message - .filter((c): c is MessageContentTextDetail => c.type === "text") - .map((c) => c.text) - .join("\n\n"); - } else { - return message; - } -} - -/** - * Extracts a single text from a multi-modal message content - * - * @param message The message to extract images from. - * @returns The extracted images - */ -export function extractSingleText( - message: MessageContentDetail, -): string | null { - if (message.type === "text") { - return message.text; - } - return null; -} - -/** - * Extracts an image from a multi-modal message content - * - * @param message The message to extract images from. - * @returns The extracted images - */ -export function extractImage(message: MessageContentDetail): ImageType | null { - if (message.type === "image_url") { - return new URL(message.image_url.url); - } - return null; -} - -export const extractDataUrlComponents = ( - dataUrl: string, -): { - mimeType: string; - base64: string; -} => { - const parts = dataUrl.split(";base64,"); - - if (parts.length !== 2 || !parts[0].startsWith("data:")) { - throw new Error("Invalid data URL"); - } - - const mimeType = parts[0].slice(5); - const base64 = parts[1]; - - return { - mimeType, - base64, - }; -}; - -/** - * @internal - */ -export function wrapLLMEvent< - AdditionalChatOptions extends object = object, - AdditionalMessageOptions extends object = object, ->( - originalMethod: LLMChat< - AdditionalChatOptions, - AdditionalMessageOptions - >["chat"], - _context: ClassMethodDecoratorContext, -) { - return async function withLLMEvent( - this: LLM<AdditionalChatOptions, AdditionalMessageOptions>, - ...params: Parameters< - LLMChat<AdditionalChatOptions, AdditionalMessageOptions>["chat"] - > - ): ReturnType< - LLMChat<AdditionalChatOptions, AdditionalMessageOptions>["chat"] - > { - const id = randomUUID(); - getCallbackManager().dispatchEvent("llm-start", { - payload: { - id, - messages: params[0].messages, - }, - }); - const response = await originalMethod.call(this, ...params); - if (Symbol.asyncIterator in response) { - // save snapshot to restore it after the response is done - const snapshot = AsyncLocalStorage.snapshot(); - const originalAsyncIterator = { - [Symbol.asyncIterator]: response[Symbol.asyncIterator].bind(response), - }; - response[Symbol.asyncIterator] = async function* () { - const finalResponse = { - raw: [] as ChatResponseChunk[], - message: { - content: "", - role: "assistant", - options: {}, - }, - } satisfies ChatResponse; - let firstOne = false; - for await (const chunk of originalAsyncIterator) { - if (!firstOne) { - firstOne = true; - finalResponse.message.content = chunk.delta; - } else { - finalResponse.message.content += chunk.delta; - } - if (chunk.options) { - finalResponse.message.options = { - ...finalResponse.message.options, - ...chunk.options, - }; - } - getCallbackManager().dispatchEvent("llm-stream", { - payload: { - id, - chunk, - }, - }); - finalResponse.raw.push(chunk); - yield chunk; - } - snapshot(() => { - getCallbackManager().dispatchEvent("llm-end", { - payload: { - id, - response: finalResponse, - }, - }); - }); - }; - } else { - getCallbackManager().dispatchEvent("llm-end", { - payload: { - id, - response, - }, - }); - } - return response; - }; -} diff --git a/packages/llamaindex/src/objects/base.ts b/packages/llamaindex/src/objects/base.ts index 4b962d26b..a13095d45 100644 --- a/packages/llamaindex/src/objects/base.ts +++ b/packages/llamaindex/src/objects/base.ts @@ -1,9 +1,9 @@ import type { BaseTool, MessageContent } from "@llamaindex/core/llms"; import type { BaseNode, Metadata } from "@llamaindex/core/schema"; import { TextNode } from "@llamaindex/core/schema"; +import { extractText } from "@llamaindex/core/utils"; import type { BaseRetriever } from "../Retriever.js"; import type { VectorStoreIndex } from "../indices/vectorStore/index.js"; -import { extractText } from "../llm/utils.js"; // Assuming that necessary interfaces and classes (like OT, TextNode, BaseNode, etc.) are defined elsewhere // Import statements (e.g., for TextNode, BaseNode) should be added based on your project's structure diff --git a/packages/llamaindex/src/postprocessors/rerankers/CohereRerank.ts b/packages/llamaindex/src/postprocessors/rerankers/CohereRerank.ts index 8a630e520..f2288364c 100644 --- a/packages/llamaindex/src/postprocessors/rerankers/CohereRerank.ts +++ b/packages/llamaindex/src/postprocessors/rerankers/CohereRerank.ts @@ -3,7 +3,7 @@ import { CohereClient } from "cohere-ai"; import type { MessageContent } from "@llamaindex/core/llms"; import type { NodeWithScore } from "@llamaindex/core/schema"; import { MetadataMode } from "@llamaindex/core/schema"; -import { extractText } from "../../llm/utils.js"; +import { extractText } from "@llamaindex/core/utils"; import type { BaseNodePostprocessor } from "../types.js"; type CohereRerankOptions = { diff --git a/packages/llamaindex/src/postprocessors/rerankers/JinaAIReranker.ts b/packages/llamaindex/src/postprocessors/rerankers/JinaAIReranker.ts index a1ffb334b..59dd3b92e 100644 --- a/packages/llamaindex/src/postprocessors/rerankers/JinaAIReranker.ts +++ b/packages/llamaindex/src/postprocessors/rerankers/JinaAIReranker.ts @@ -1,8 +1,8 @@ import type { MessageContent } from "@llamaindex/core/llms"; import type { NodeWithScore } from "@llamaindex/core/schema"; import { MetadataMode } from "@llamaindex/core/schema"; +import { extractText } from "@llamaindex/core/utils"; import { getEnv } from "@llamaindex/env"; -import { extractText } from "../../llm/utils.js"; import type { BaseNodePostprocessor } from "../types.js"; interface JinaAIRerankerResult { diff --git a/packages/llamaindex/src/synthesizers/MultiModalResponseSynthesizer.ts b/packages/llamaindex/src/synthesizers/MultiModalResponseSynthesizer.ts index 4d46af0ae..2c998a410 100644 --- a/packages/llamaindex/src/synthesizers/MultiModalResponseSynthesizer.ts +++ b/packages/llamaindex/src/synthesizers/MultiModalResponseSynthesizer.ts @@ -1,8 +1,8 @@ import { MetadataMode } from "@llamaindex/core/schema"; +import { streamConverter } from "@llamaindex/core/utils"; import { EngineResponse } from "../EngineResponse.js"; import type { ServiceContext } from "../ServiceContext.js"; import { llmFromSettingsOrContext } from "../Settings.js"; -import { streamConverter } from "../llm/utils.js"; import { PromptMixin } from "../prompts/Mixin.js"; import type { TextQaPrompt } from "./../Prompt.js"; import { defaultTextQaPrompt } from "./../Prompt.js"; diff --git a/packages/llamaindex/src/synthesizers/ResponseSynthesizer.ts b/packages/llamaindex/src/synthesizers/ResponseSynthesizer.ts index d05631939..9998efe9d 100644 --- a/packages/llamaindex/src/synthesizers/ResponseSynthesizer.ts +++ b/packages/llamaindex/src/synthesizers/ResponseSynthesizer.ts @@ -1,7 +1,7 @@ import { MetadataMode } from "@llamaindex/core/schema"; +import { streamConverter } from "@llamaindex/core/utils"; import { EngineResponse } from "../EngineResponse.js"; import type { ServiceContext } from "../ServiceContext.js"; -import { streamConverter } from "../llm/utils.js"; import { PromptMixin } from "../prompts/Mixin.js"; import type { ResponseBuilderPrompts } from "./builders.js"; import { getResponseBuilder } from "./builders.js"; diff --git a/packages/llamaindex/src/synthesizers/builders.ts b/packages/llamaindex/src/synthesizers/builders.ts index cc8147f67..901b6728a 100644 --- a/packages/llamaindex/src/synthesizers/builders.ts +++ b/packages/llamaindex/src/synthesizers/builders.ts @@ -1,5 +1,5 @@ import type { LLM } from "@llamaindex/core/llms"; -import { streamConverter } from "../llm/utils.js"; +import { streamConverter } from "@llamaindex/core/utils"; import type { RefinePrompt, SimplePrompt, diff --git a/packages/llamaindex/src/tools/functionTool.ts b/packages/llamaindex/src/tools/functionTool.ts index 21ec4995f..332abe608 100644 --- a/packages/llamaindex/src/tools/functionTool.ts +++ b/packages/llamaindex/src/tools/functionTool.ts @@ -1,6 +1,6 @@ +import type { JSONValue } from "@llamaindex/core/global"; import type { BaseTool, ToolMetadata } from "@llamaindex/core/llms"; import type { JSONSchemaType } from "ajv"; -import type { JSONValue } from "../types.js"; export class FunctionTool<T, R extends JSONValue | Promise<JSONValue>> implements BaseTool<T> diff --git a/packages/llamaindex/src/types.ts b/packages/llamaindex/src/types.ts index d5b28bf8d..19d697e3c 100644 --- a/packages/llamaindex/src/types.ts +++ b/packages/llamaindex/src/types.ts @@ -65,11 +65,3 @@ export class QueryBundle { } export type UUID = `${string}-${string}-${string}-${string}-${string}`; - -export type JSONValue = string | number | boolean | JSONObject | JSONArray; - -export type JSONObject = { - [key: string]: JSONValue; -}; - -type JSONArray = Array<JSONValue>; diff --git a/packages/llamaindex/tests/CallbackManager.test.ts b/packages/llamaindex/tests/CallbackManager.test.ts deleted file mode 100644 index 63f1e5fde..000000000 --- a/packages/llamaindex/tests/CallbackManager.test.ts +++ /dev/null @@ -1,168 +0,0 @@ -import { - afterAll, - beforeAll, - beforeEach, - describe, - expect, - test, - vi, -} from "vitest"; - -import { Document } from "@llamaindex/core/schema"; -import type { ServiceContext } from "llamaindex/ServiceContext"; -import { serviceContextFromDefaults } from "llamaindex/ServiceContext"; -import { Settings } from "llamaindex/Settings"; -import type { - RetrievalCallbackResponse, - StreamCallbackResponse, -} from "llamaindex/callbacks/CallbackManager"; -import { CallbackManager } from "llamaindex/callbacks/CallbackManager"; -import { OpenAIEmbedding } from "llamaindex/embeddings/index"; -import { SummaryIndex } from "llamaindex/indices/summary/index"; -import { VectorStoreIndex } from "llamaindex/indices/vectorStore/index"; -import { OpenAI } from "llamaindex/llm/openai"; -import { - ResponseSynthesizer, - SimpleResponseBuilder, -} from "llamaindex/synthesizers/index"; -import { mockEmbeddingModel, mockLlmGeneration } from "./utility/mockOpenAI.js"; - -describe("CallbackManager: onLLMStream and onRetrieve", () => { - let serviceContext: ServiceContext; - let streamCallbackData: StreamCallbackResponse[] = []; - let retrieveCallbackData: RetrievalCallbackResponse[] = []; - let document: Document; - let callbackManager: CallbackManager; - - beforeAll(async () => { - document = new Document({ text: "Author: My name is Paul Graham" }); - callbackManager = new CallbackManager({ - onLLMStream: (data) => { - streamCallbackData.push(data); - }, - onRetrieve: (data) => { - retrieveCallbackData.push(data); - }, - }); - - const languageModel = new OpenAI({ - model: "gpt-3.5-turbo", - }); - - mockLlmGeneration({ languageModel, callbackManager }); - - const embedModel = new OpenAIEmbedding(); - mockEmbeddingModel(embedModel); - - serviceContext = serviceContextFromDefaults({ - llm: languageModel, - embedModel, - }); - }); - - beforeEach(() => { - streamCallbackData = []; - retrieveCallbackData = []; - }); - - afterAll(() => { - vi.clearAllMocks(); - }); - - test("For VectorStoreIndex w/ a SimpleResponseBuilder", async () => { - const vectorStoreIndex = await VectorStoreIndex.fromDocuments([document], { - serviceContext, - }); - const queryEngine = vectorStoreIndex.asQueryEngine(); - const query = "What is the author's name?"; - const response = await Settings.withCallbackManager(callbackManager, () => { - return queryEngine.query({ query }); - }); - - expect(response.toString()).toBe("MOCK_TOKEN_1-MOCK_TOKEN_2"); - expect(streamCallbackData).toEqual([ - { - index: 0, - token: { - id: "id", - object: "object", - created: 1, - model: "model", - choices: expect.any(Array), - }, - }, - { - index: 1, - token: { - id: "id", - object: "object", - created: 1, - model: "model", - choices: expect.any(Array), - }, - }, - { - index: 2, - isDone: true, - }, - ]); - expect(retrieveCallbackData).toEqual([ - { - query: query, - nodes: expect.any(Array), - }, - ]); - }); - - test("For SummaryIndex w/ a SummaryIndexRetriever", async () => { - const summaryIndex = await SummaryIndex.fromDocuments([document], { - serviceContext, - }); - const responseBuilder = new SimpleResponseBuilder(serviceContext); - const responseSynthesizer = new ResponseSynthesizer({ - serviceContext: serviceContext, - responseBuilder, - }); - const queryEngine = summaryIndex.asQueryEngine({ - responseSynthesizer, - }); - const query = "What is the author's name?"; - const response = await Settings.withCallbackManager( - callbackManager, - async () => queryEngine.query({ query }), - ); - expect(response.toString()).toBe("MOCK_TOKEN_1-MOCK_TOKEN_2"); - expect(streamCallbackData).toEqual([ - { - index: 0, - token: { - id: "id", - object: "object", - created: 1, - model: "model", - choices: expect.any(Array), - }, - }, - { - index: 1, - token: { - id: "id", - object: "object", - created: 1, - model: "model", - choices: expect.any(Array), - }, - }, - { - index: 2, - isDone: true, - }, - ]); - expect(retrieveCallbackData).toEqual([ - { - query: query, - nodes: expect.any(Array), - }, - ]); - }); -}); diff --git a/packages/llamaindex/tests/utility/mockOpenAI.ts b/packages/llamaindex/tests/utility/mockOpenAI.ts index e2677bb19..102ce43b0 100644 --- a/packages/llamaindex/tests/utility/mockOpenAI.ts +++ b/packages/llamaindex/tests/utility/mockOpenAI.ts @@ -1,7 +1,7 @@ +import type { CallbackManager } from "@llamaindex/core/global"; import type { LLMChatParamsBase } from "llamaindex"; import { Settings } from "llamaindex"; -import type { CallbackManager } from "llamaindex/callbacks/CallbackManager"; -import type { OpenAIEmbedding } from "llamaindex/embeddings/index"; +import type { OpenAIEmbedding } from "llamaindex/embeddings/OpenAIEmbedding"; import { OpenAI } from "llamaindex/llm/openai"; import { vi } from "vitest"; @@ -9,12 +9,9 @@ export const DEFAULT_LLM_TEXT_OUTPUT = "MOCK_TOKEN_1-MOCK_TOKEN_2"; export function mockLlmGeneration({ languageModel, - callbackManager, }: { languageModel?: OpenAI; - callbackManager?: CallbackManager; } = {}) { - callbackManager = callbackManager || Settings.callbackManager; if (!languageModel && Settings.llm instanceof OpenAI) { languageModel = Settings.llm; } @@ -24,34 +21,6 @@ export function mockLlmGeneration({ vi.spyOn(languageModel, "chat").mockImplementation( async ({ messages }: LLMChatParamsBase) => { const text = DEFAULT_LLM_TEXT_OUTPUT; - if (callbackManager?.onLLMStream) { - const chunks = text.split("-"); - for (let i = 0; i < chunks.length; i++) { - const chunk = chunks[i]; - await callbackManager?.onLLMStream({ - index: i, - token: { - id: "id", - object: "object", - created: 1, - model: "model", - choices: [ - { - index: 0, - delta: { - content: chunk, - }, - finish_reason: null, - }, - ], - }, - }); - } - await callbackManager?.onLLMStream({ - index: chunks.length, - isDone: true, - }); - } return new Promise((resolve) => { resolve({ get raw() { @@ -133,34 +102,6 @@ export function mocStructuredkLlmGeneration({ vi.spyOn(languageModel, "chat").mockImplementation( async ({ messages }: LLMChatParamsBase) => { const text = structuredOutput; - if (callbackManager?.onLLMStream) { - const chunks = text.split("-"); - for (let i = 0; i < chunks.length; i++) { - const chunk = chunks[i]; - await callbackManager?.onLLMStream({ - index: i, - token: { - id: "id", - object: "object", - created: 1, - model: "model", - choices: [ - { - index: 0, - delta: { - content: chunk, - }, - finish_reason: null, - }, - ], - }, - }); - } - await callbackManager?.onLLMStream({ - index: chunks.length, - isDone: true, - }); - } return new Promise((resolve) => { resolve({ get raw() { diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 174844a1f..1cbd394ea 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -134,6 +134,9 @@ importers: '@datastax/astra-db-ts': specifier: ^1.2.1 version: 1.2.1 + '@llamaindex/core': + specifier: ^0.0.3 + version: link:../packages/core '@notionhq/client': specifier: ^2.2.15 version: 2.2.15(encoding@0.1.13) @@ -346,13 +349,13 @@ importers: '@aws-sdk/client-bedrock-runtime': specifier: ^3.600.0 version: 3.602.0 + '@llamaindex/core': + specifier: workspace:* + version: link:../core + devDependencies: '@types/node': specifier: ^20.14.2 version: 20.14.2 - llamaindex: - specifier: workspace:* - version: link:../llamaindex - devDependencies: bunchee: specifier: 5.3.0-beta.0 version: 5.3.0-beta.0(typescript@5.5.2) @@ -618,6 +621,9 @@ importers: '@faker-js/faker': specifier: ^8.4.1 version: 8.4.1 + '@llamaindex/core': + specifier: workspace:* + version: link:../../core '@types/node': specifier: ^20.12.11 version: 20.14.2 @@ -11127,10 +11133,10 @@ snapshots: dependencies: '@aws-crypto/sha256-browser': 5.2.0 '@aws-crypto/sha256-js': 5.2.0 - '@aws-sdk/client-sso-oidc': 3.600.0(@aws-sdk/client-sts@3.600.0) - '@aws-sdk/client-sts': 3.600.0 + '@aws-sdk/client-sso-oidc': 3.600.0 + '@aws-sdk/client-sts': 3.600.0(@aws-sdk/client-sso-oidc@3.600.0) '@aws-sdk/core': 3.598.0 - '@aws-sdk/credential-provider-node': 3.600.0(@aws-sdk/client-sso-oidc@3.600.0(@aws-sdk/client-sts@3.600.0))(@aws-sdk/client-sts@3.600.0) + '@aws-sdk/credential-provider-node': 3.600.0(@aws-sdk/client-sso-oidc@3.600.0)(@aws-sdk/client-sts@3.600.0(@aws-sdk/client-sso-oidc@3.600.0)) '@aws-sdk/middleware-host-header': 3.598.0 '@aws-sdk/middleware-logger': 3.598.0 '@aws-sdk/middleware-recursion-detection': 3.598.0 @@ -11173,13 +11179,13 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/client-sso-oidc@3.600.0(@aws-sdk/client-sts@3.600.0)': + '@aws-sdk/client-sso-oidc@3.600.0': dependencies: '@aws-crypto/sha256-browser': 5.2.0 '@aws-crypto/sha256-js': 5.2.0 - '@aws-sdk/client-sts': 3.600.0 + '@aws-sdk/client-sts': 3.600.0(@aws-sdk/client-sso-oidc@3.600.0) '@aws-sdk/core': 3.598.0 - '@aws-sdk/credential-provider-node': 3.600.0(@aws-sdk/client-sso-oidc@3.600.0(@aws-sdk/client-sts@3.600.0))(@aws-sdk/client-sts@3.600.0) + '@aws-sdk/credential-provider-node': 3.600.0(@aws-sdk/client-sso-oidc@3.600.0)(@aws-sdk/client-sts@3.600.0(@aws-sdk/client-sso-oidc@3.600.0)) '@aws-sdk/middleware-host-header': 3.598.0 '@aws-sdk/middleware-logger': 3.598.0 '@aws-sdk/middleware-recursion-detection': 3.598.0 @@ -11216,7 +11222,6 @@ snapshots: '@smithy/util-utf8': 3.0.0 tslib: 2.6.2 transitivePeerDependencies: - - '@aws-sdk/client-sts' - aws-crt '@aws-sdk/client-sso@3.598.0': @@ -11262,13 +11267,13 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/client-sts@3.600.0': + '@aws-sdk/client-sts@3.600.0(@aws-sdk/client-sso-oidc@3.600.0)': dependencies: '@aws-crypto/sha256-browser': 5.2.0 '@aws-crypto/sha256-js': 5.2.0 - '@aws-sdk/client-sso-oidc': 3.600.0(@aws-sdk/client-sts@3.600.0) + '@aws-sdk/client-sso-oidc': 3.600.0 '@aws-sdk/core': 3.598.0 - '@aws-sdk/credential-provider-node': 3.600.0(@aws-sdk/client-sso-oidc@3.600.0(@aws-sdk/client-sts@3.600.0))(@aws-sdk/client-sts@3.600.0) + '@aws-sdk/credential-provider-node': 3.600.0(@aws-sdk/client-sso-oidc@3.600.0)(@aws-sdk/client-sts@3.600.0(@aws-sdk/client-sso-oidc@3.600.0)) '@aws-sdk/middleware-host-header': 3.598.0 '@aws-sdk/middleware-logger': 3.598.0 '@aws-sdk/middleware-recursion-detection': 3.598.0 @@ -11305,6 +11310,7 @@ snapshots: '@smithy/util-utf8': 3.0.0 tslib: 2.6.2 transitivePeerDependencies: + - '@aws-sdk/client-sso-oidc' - aws-crt '@aws-sdk/core@3.598.0': @@ -11336,14 +11342,14 @@ snapshots: '@smithy/util-stream': 3.0.4 tslib: 2.6.2 - '@aws-sdk/credential-provider-ini@3.598.0(@aws-sdk/client-sso-oidc@3.600.0(@aws-sdk/client-sts@3.600.0))(@aws-sdk/client-sts@3.600.0)': + '@aws-sdk/credential-provider-ini@3.598.0(@aws-sdk/client-sso-oidc@3.600.0)(@aws-sdk/client-sts@3.600.0(@aws-sdk/client-sso-oidc@3.600.0))': dependencies: - '@aws-sdk/client-sts': 3.600.0 + '@aws-sdk/client-sts': 3.600.0(@aws-sdk/client-sso-oidc@3.600.0) '@aws-sdk/credential-provider-env': 3.598.0 '@aws-sdk/credential-provider-http': 3.598.0 '@aws-sdk/credential-provider-process': 3.598.0 - '@aws-sdk/credential-provider-sso': 3.598.0(@aws-sdk/client-sso-oidc@3.600.0(@aws-sdk/client-sts@3.600.0)) - '@aws-sdk/credential-provider-web-identity': 3.598.0(@aws-sdk/client-sts@3.600.0) + '@aws-sdk/credential-provider-sso': 3.598.0(@aws-sdk/client-sso-oidc@3.600.0) + '@aws-sdk/credential-provider-web-identity': 3.598.0(@aws-sdk/client-sts@3.600.0(@aws-sdk/client-sso-oidc@3.600.0)) '@aws-sdk/types': 3.598.0 '@smithy/credential-provider-imds': 3.1.2 '@smithy/property-provider': 3.1.2 @@ -11354,14 +11360,14 @@ snapshots: - '@aws-sdk/client-sso-oidc' - aws-crt - '@aws-sdk/credential-provider-node@3.600.0(@aws-sdk/client-sso-oidc@3.600.0(@aws-sdk/client-sts@3.600.0))(@aws-sdk/client-sts@3.600.0)': + '@aws-sdk/credential-provider-node@3.600.0(@aws-sdk/client-sso-oidc@3.600.0)(@aws-sdk/client-sts@3.600.0(@aws-sdk/client-sso-oidc@3.600.0))': dependencies: '@aws-sdk/credential-provider-env': 3.598.0 '@aws-sdk/credential-provider-http': 3.598.0 - '@aws-sdk/credential-provider-ini': 3.598.0(@aws-sdk/client-sso-oidc@3.600.0(@aws-sdk/client-sts@3.600.0))(@aws-sdk/client-sts@3.600.0) + '@aws-sdk/credential-provider-ini': 3.598.0(@aws-sdk/client-sso-oidc@3.600.0)(@aws-sdk/client-sts@3.600.0(@aws-sdk/client-sso-oidc@3.600.0)) '@aws-sdk/credential-provider-process': 3.598.0 - '@aws-sdk/credential-provider-sso': 3.598.0(@aws-sdk/client-sso-oidc@3.600.0(@aws-sdk/client-sts@3.600.0)) - '@aws-sdk/credential-provider-web-identity': 3.598.0(@aws-sdk/client-sts@3.600.0) + '@aws-sdk/credential-provider-sso': 3.598.0(@aws-sdk/client-sso-oidc@3.600.0) + '@aws-sdk/credential-provider-web-identity': 3.598.0(@aws-sdk/client-sts@3.600.0(@aws-sdk/client-sso-oidc@3.600.0)) '@aws-sdk/types': 3.598.0 '@smithy/credential-provider-imds': 3.1.2 '@smithy/property-provider': 3.1.2 @@ -11381,10 +11387,10 @@ snapshots: '@smithy/types': 3.2.0 tslib: 2.6.2 - '@aws-sdk/credential-provider-sso@3.598.0(@aws-sdk/client-sso-oidc@3.600.0(@aws-sdk/client-sts@3.600.0))': + '@aws-sdk/credential-provider-sso@3.598.0(@aws-sdk/client-sso-oidc@3.600.0)': dependencies: '@aws-sdk/client-sso': 3.598.0 - '@aws-sdk/token-providers': 3.598.0(@aws-sdk/client-sso-oidc@3.600.0(@aws-sdk/client-sts@3.600.0)) + '@aws-sdk/token-providers': 3.598.0(@aws-sdk/client-sso-oidc@3.600.0) '@aws-sdk/types': 3.598.0 '@smithy/property-provider': 3.1.2 '@smithy/shared-ini-file-loader': 3.1.2 @@ -11394,9 +11400,9 @@ snapshots: - '@aws-sdk/client-sso-oidc' - aws-crt - '@aws-sdk/credential-provider-web-identity@3.598.0(@aws-sdk/client-sts@3.600.0)': + '@aws-sdk/credential-provider-web-identity@3.598.0(@aws-sdk/client-sts@3.600.0(@aws-sdk/client-sso-oidc@3.600.0))': dependencies: - '@aws-sdk/client-sts': 3.600.0 + '@aws-sdk/client-sts': 3.600.0(@aws-sdk/client-sso-oidc@3.600.0) '@aws-sdk/types': 3.598.0 '@smithy/property-provider': 3.1.2 '@smithy/types': 3.2.0 @@ -11439,9 +11445,9 @@ snapshots: '@smithy/util-middleware': 3.0.2 tslib: 2.6.2 - '@aws-sdk/token-providers@3.598.0(@aws-sdk/client-sso-oidc@3.600.0(@aws-sdk/client-sts@3.600.0))': + '@aws-sdk/token-providers@3.598.0(@aws-sdk/client-sso-oidc@3.600.0)': dependencies: - '@aws-sdk/client-sso-oidc': 3.600.0(@aws-sdk/client-sts@3.600.0) + '@aws-sdk/client-sso-oidc': 3.600.0 '@aws-sdk/types': 3.598.0 '@smithy/property-provider': 3.1.2 '@smithy/shared-ini-file-loader': 3.1.2 -- GitLab