Skip to content
Snippets Groups Projects
Unverified Commit d952e68e authored by Gunnar Holwerda's avatar Gunnar Holwerda Committed by GitHub
Browse files

Fix refine synthesizer empty source nodes behavior (#1677)

parent 5c026e83
No related branches found
No related tags found
No related merge requests found
---
"@llamaindex/core": patch
---
Refine synthesizer will now return an empty string as the response if an empty array of source nodes were provided. Before it would throw an internal error converting undefined to ReadableStream.
...@@ -116,7 +116,14 @@ class Refine extends BaseSynthesizer { ...@@ -116,7 +116,14 @@ class Refine extends BaseSynthesizer {
} }
} }
// fixme: no source nodes provided, cannot fix right now due to lack of context if (response === undefined) {
response = stream
? (async function* () {
yield "";
})()
: "";
}
if (typeof response === "string") { if (typeof response === "string") {
return EngineResponse.fromResponse(response, false, nodes); return EngineResponse.fromResponse(response, false, nodes);
} else { } else {
......
...@@ -66,9 +66,9 @@ describe("ChatMemoryBuffer", () => { ...@@ -66,9 +66,9 @@ describe("ChatMemoryBuffer", () => {
expect(result).toEqual([...inputMessages, ...storedMessages]); expect(result).toEqual([...inputMessages, ...storedMessages]);
}); });
test("getMessages throws error when initial token count exceeds limit", () => { test("getMessages throws error when initial token count exceeds limit", async () => {
const buffer = new ChatMemoryBuffer({ tokenLimit: 10 }); const buffer = new ChatMemoryBuffer({ tokenLimit: 10 });
expect(async () => buffer.getMessages(undefined, 20)).rejects.toThrow( await expect(async () => buffer.getMessages(undefined, 20)).rejects.toThrow(
"Initial token count exceeds token limit", "Initial token count exceeds token limit",
); );
}); });
......
import { describe, expect, test, vi } from "vitest"; import { beforeEach, describe, expect, test, vi } from "vitest";
import type { LLMMetadata } from "../../llms/dist/index.js"; import type { LLMMetadata } from "../../llms/dist/index.js";
import { getResponseSynthesizer } from "../../response-synthesizers/dist/index.js"; import { getResponseSynthesizer } from "../../response-synthesizers/dist/index.js";
import { Document } from "../../schema/dist/index.js"; import { Document } from "../../schema/dist/index.js";
...@@ -10,26 +10,69 @@ const mockLllm = () => ({ ...@@ -10,26 +10,69 @@ const mockLllm = () => ({
return response; return response;
} }
function* gen() { return {
// yield a few times to make sure each chunk has the sourceNodes [Symbol.asyncIterator]: function* gen() {
yield response; // yield a few times to make sure each chunk has the sourceNodes
yield response; yield response;
yield response; yield response;
} yield response;
},
return gen(); };
}), }),
chat: vi.fn(), chat: vi.fn(),
metadata: {} as unknown as LLMMetadata, metadata: {} as unknown as LLMMetadata,
}); });
describe("refine response synthesizer", () => {
let synthesizer: ReturnType<typeof getResponseSynthesizer<"refine">>;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const isAsyncIterable = (obj: any): boolean =>
obj[Symbol.asyncIterator] !== undefined;
beforeEach(() => {
synthesizer = getResponseSynthesizer("refine", {
llm: mockLllm(),
});
});
describe("getResponse", () => {
test("should return async iterable of EngineResponse when stream is true and sourceNodes are empty", async () => {
const response = await synthesizer.getResponse(
"unimportant query",
[],
true,
);
expect(isAsyncIterable(response)).toBe(true);
for await (const chunk of response) {
expect(chunk.message.content).toEqual("");
}
});
test("should return non async iterable when stream is false and sourceNodes are empty", async () => {
const response = await synthesizer.getResponse(
"unimportant query",
[],
false,
);
expect(isAsyncIterable(response)).toBe(false);
expect(response.message.content).toEqual("");
});
});
});
describe("compact and refine response synthesizer", () => { describe("compact and refine response synthesizer", () => {
let synthesizer: ReturnType<typeof getResponseSynthesizer<"compact">>;
beforeEach(() => {
synthesizer = getResponseSynthesizer("compact", {
llm: mockLllm(),
});
});
describe("synthesize", () => { describe("synthesize", () => {
test("should return original sourceNodes with response when stream = false", async () => { test("should return original sourceNodes with response when stream = false", async () => {
const synthesizer = getResponseSynthesizer("compact", {
llm: mockLllm(),
});
const sourceNode = { node: new Document({}), score: 1 }; const sourceNode = { node: new Document({}), score: 1 };
const response = await synthesizer.synthesize( const response = await synthesizer.synthesize(
...@@ -44,10 +87,6 @@ describe("compact and refine response synthesizer", () => { ...@@ -44,10 +87,6 @@ describe("compact and refine response synthesizer", () => {
}); });
test("should return original sourceNodes with response when stream = true", async () => { test("should return original sourceNodes with response when stream = true", async () => {
const synthesizer = getResponseSynthesizer("compact", {
llm: mockLllm(),
});
const sourceNode = { node: new Document({}), score: 1 }; const sourceNode = { node: new Document({}), score: 1 };
const response = await synthesizer.synthesize( const response = await synthesizer.synthesize(
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment