Skip to content
Snippets Groups Projects
Unverified Commit 96f72ad8 authored by irevived1's avatar irevived1 Committed by GitHub
Browse files

fix: openai streaming with token usage and finish_reason (#1265)


Co-authored-by: default avatarAlex Yang <himself65@outlook.com>
parent f3556c01
No related branches found
No related tags found
No related merge requests found
---
"@llamaindex/openai": patch
---
fix: openai streaming with token usage and finish_reason
......@@ -425,10 +425,25 @@ export class OpenAI extends ToolCallLLM<OpenAIAdditionalChatOptions> {
let currentToolCall: PartialToolCall | null = null;
const toolCallMap = new Map<string, PartialToolCall>();
for await (const part of stream) {
if (part.choices.length === 0) continue;
if (part.choices.length === 0) {
if (part.usage) {
yield {
raw: part,
delta: "",
};
}
continue;
}
const choice = part.choices[0]!;
// skip parts that don't have any content
if (!(choice.delta.content || choice.delta.tool_calls)) continue;
if (
!(
choice.delta.content ||
choice.delta.tool_calls ||
choice.finish_reason
)
)
continue;
let shouldEmitToolCall: PartialToolCall | null = null;
if (
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment