Skip to content
Snippets Groups Projects
Commit 48103647 authored by Kieran Simkin's avatar Kieran Simkin Committed by Alex Yang
Browse files

fix: handle `RouterQueryEngine` with string query (#1181)

parent 2dcad52d
No related branches found
No related tags found
No related merge requests found
---
"llamaindex": patch
---
fix: handle `RouterQueryEngine` with string query
---
"@llamaindex/cloud": patch
---
fix: bump version
import { LLMSingleSelector, Settings } from "llamaindex";
import assert from "node:assert";
import { test } from "node:test";
import { mockLLMEvent } from "./utils.js";
await test("#1177", async (t) => {
await mockLLMEvent(t, "#1177");
await t.test(async () => {
const selector = new LLMSingleSelector({
llm: Settings.llm,
});
{
const result = await selector.select(
[
{
description: "Math calculation",
},
{
description: "Search from google",
},
],
"calculate 2 + 2",
);
assert.equal(result.selections.length, 1);
assert.equal(result.selections.at(0)!.index, 0);
}
{
const result = await selector.select(
[
{
description: "Math calculation",
},
{
description: "Search from google",
},
],
{
query: "calculate 2 + 2",
},
);
assert.equal(result.selections.length, 1);
assert.equal(result.selections.at(0)!.index, 0);
}
{
const result = await selector.select(
[
{
description: "Math calculation",
},
{
description: "Search from google",
},
],
{
query: [
{
type: "text",
text: "calculate 2 + 2",
},
],
},
);
assert.equal(result.selections.length, 1);
assert.equal(result.selections.at(0)!.index, 0);
}
});
});
{
"llmEventStart": [
{
"id": "PRESERVE_0",
"messages": [
{
"content": "Some choices are given below. It is provided in a numbered list (1 to 42), where each item in the list corresponds to a summary.\n---------------------\n(1) Math calculation(2) Search from google\n---------------------\nUsing only the choices above and not prior knowledge, return the choice that is most relevant to the question: 'calculate 2 + 2'\n\n\nThe output should be ONLY JSON formatted as a JSON instance.\n\nHere is an example:\n[\n {\n \"choice\": 1,\n \"reason\": \"<insert reason for choice>\"\n },\n ...\n]\n",
"role": "user"
}
]
},
{
"id": "PRESERVE_1",
"messages": [
{
"content": "Some choices are given below. It is provided in a numbered list (1 to 42), where each item in the list corresponds to a summary.\n---------------------\n(1) Math calculation(2) Search from google\n---------------------\nUsing only the choices above and not prior knowledge, return the choice that is most relevant to the question: 'calculate 2 + 2'\n\n\nThe output should be ONLY JSON formatted as a JSON instance.\n\nHere is an example:\n[\n {\n \"choice\": 1,\n \"reason\": \"<insert reason for choice>\"\n },\n ...\n]\n",
"role": "user"
}
]
},
{
"id": "PRESERVE_2",
"messages": [
{
"content": "Some choices are given below. It is provided in a numbered list (1 to 42), where each item in the list corresponds to a summary.\n---------------------\n(1) Math calculation(2) Search from google\n---------------------\nUsing only the choices above and not prior knowledge, return the choice that is most relevant to the question: 'calculate 2 + 2'\n\n\nThe output should be ONLY JSON formatted as a JSON instance.\n\nHere is an example:\n[\n {\n \"choice\": 1,\n \"reason\": \"<insert reason for choice>\"\n },\n ...\n]\n",
"role": "user"
}
]
}
],
"llmEventEnd": [
{
"id": "PRESERVE_0",
"response": {
"raw": null,
"message": {
"content": "[\n {\n \"choice\": 1,\n \"reason\": \"The question 'calculate 2 + 2' is directly asking for a math calculation, which corresponds to choice 1.\"\n }\n]",
"role": "assistant",
"options": {}
}
}
},
{
"id": "PRESERVE_1",
"response": {
"raw": null,
"message": {
"content": "[\n {\n \"choice\": 1,\n \"reason\": \"The question 'calculate 2 + 2' is asking for a mathematical calculation, which directly corresponds to choice 1: Math calculation.\"\n }\n]",
"role": "assistant",
"options": {}
}
}
},
{
"id": "PRESERVE_2",
"response": {
"raw": null,
"message": {
"content": "[\n {\n \"choice\": 1,\n \"reason\": \"The question 'calculate 2 + 2' is asking for a mathematical calculation, which directly corresponds to choice 1: Math calculation.\"\n }\n]",
"role": "assistant",
"options": {}
}
}
}
],
"llmEventStream": []
}
\ No newline at end of file
...@@ -59,10 +59,12 @@ class GlobalSettings implements Config { ...@@ -59,10 +59,12 @@ class GlobalSettings implements Config {
} }
get llm(): LLM { get llm(): LLM {
if (CoreSettings.llm === null) { // fixme: we might need check internal error instead of try-catch here
try {
CoreSettings.llm;
} catch (error) {
CoreSettings.llm = new OpenAI(); CoreSettings.llm = new OpenAI();
} }
return CoreSettings.llm; return CoreSettings.llm;
} }
......
...@@ -12,8 +12,8 @@ const formatStr = `The output should be ONLY JSON formatted as a JSON instance. ...@@ -12,8 +12,8 @@ const formatStr = `The output should be ONLY JSON formatted as a JSON instance.
Here is an example: Here is an example:
[ [
{ {
choice: 1, "choice": 1,
reason: "<insert reason for choice>" "reason": "<insert reason for choice>"
}, },
... ...
] ]
......
...@@ -159,7 +159,7 @@ export class LLMSingleSelector extends BaseSelector { ...@@ -159,7 +159,7 @@ export class LLMSingleSelector extends BaseSelector {
const prompt = this.prompt.format({ const prompt = this.prompt.format({
numChoices: `${choicesText.length}`, numChoices: `${choicesText.length}`,
context: choicesText, context: choicesText,
query: extractText(query.query), query: extractText(query),
}); });
const formattedPrompt = this.outputParser.format(prompt); const formattedPrompt = this.outputParser.format(prompt);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment