From 96f72ad86e9612b46b5743bbe2c4812a5e9ac9b4 Mon Sep 17 00:00:00 2001 From: irevived1 <18597212+irevived1@users.noreply.github.com> Date: Thu, 26 Sep 2024 20:51:59 -0400 Subject: [PATCH] fix: openai streaming with token usage and finish_reason (#1265) Co-authored-by: Alex Yang <himself65@outlook.com> --- .changeset/nine-carpets-jam.md | 5 +++++ packages/llm/openai/src/llm.ts | 19 +++++++++++++++++-- 2 files changed, 22 insertions(+), 2 deletions(-) create mode 100644 .changeset/nine-carpets-jam.md diff --git a/.changeset/nine-carpets-jam.md b/.changeset/nine-carpets-jam.md new file mode 100644 index 000000000..67e2b2483 --- /dev/null +++ b/.changeset/nine-carpets-jam.md @@ -0,0 +1,5 @@ +--- +"@llamaindex/openai": patch +--- + +fix: openai streaming with token usage and finish_reason diff --git a/packages/llm/openai/src/llm.ts b/packages/llm/openai/src/llm.ts index bcd6e494f..a10f658db 100644 --- a/packages/llm/openai/src/llm.ts +++ b/packages/llm/openai/src/llm.ts @@ -425,10 +425,25 @@ export class OpenAI extends ToolCallLLM<OpenAIAdditionalChatOptions> { let currentToolCall: PartialToolCall | null = null; const toolCallMap = new Map<string, PartialToolCall>(); for await (const part of stream) { - if (part.choices.length === 0) continue; + if (part.choices.length === 0) { + if (part.usage) { + yield { + raw: part, + delta: "", + }; + } + continue; + } const choice = part.choices[0]!; // skip parts that don't have any content - if (!(choice.delta.content || choice.delta.tool_calls)) continue; + if ( + !( + choice.delta.content || + choice.delta.tool_calls || + choice.finish_reason + ) + ) + continue; let shouldEmitToolCall: PartialToolCall | null = null; if ( -- GitLab