diff --git a/.changeset/few-otters-tie.md b/.changeset/few-otters-tie.md
new file mode 100644
index 0000000000000000000000000000000000000000..7f0f58611183c3524d6e6b7b97f5b687fff907e7
--- /dev/null
+++ b/.changeset/few-otters-tie.md
@@ -0,0 +1,5 @@
+---
+"llamaindex": patch
+---
+
+fix: handle `RouterQueryEngine` with string query
diff --git a/.changeset/little-bats-boil.md b/.changeset/little-bats-boil.md
new file mode 100644
index 0000000000000000000000000000000000000000..083ab8d17d5ceaadbd66c57a0f54402acf1acb12
--- /dev/null
+++ b/.changeset/little-bats-boil.md
@@ -0,0 +1,5 @@
+---
+"@llamaindex/cloud": patch
+---
+
+fix: bump version
diff --git a/packages/llamaindex/e2e/node/issue.e2e.ts b/packages/llamaindex/e2e/node/issue.e2e.ts
new file mode 100644
index 0000000000000000000000000000000000000000..4071214ef2a3bfc72679275fd6caa7816858eb1e
--- /dev/null
+++ b/packages/llamaindex/e2e/node/issue.e2e.ts
@@ -0,0 +1,67 @@
+import { LLMSingleSelector, Settings } from "llamaindex";
+import assert from "node:assert";
+import { test } from "node:test";
+import { mockLLMEvent } from "./utils.js";
+
+await test("#1177", async (t) => {
+  await mockLLMEvent(t, "#1177");
+  await t.test(async () => {
+    const selector = new LLMSingleSelector({
+      llm: Settings.llm,
+    });
+    {
+      const result = await selector.select(
+        [
+          {
+            description: "Math calculation",
+          },
+          {
+            description: "Search from google",
+          },
+        ],
+        "calculate 2 + 2",
+      );
+      assert.equal(result.selections.length, 1);
+      assert.equal(result.selections.at(0)!.index, 0);
+    }
+    {
+      const result = await selector.select(
+        [
+          {
+            description: "Math calculation",
+          },
+          {
+            description: "Search from google",
+          },
+        ],
+        {
+          query: "calculate 2 + 2",
+        },
+      );
+      assert.equal(result.selections.length, 1);
+      assert.equal(result.selections.at(0)!.index, 0);
+    }
+    {
+      const result = await selector.select(
+        [
+          {
+            description: "Math calculation",
+          },
+          {
+            description: "Search from google",
+          },
+        ],
+        {
+          query: [
+            {
+              type: "text",
+              text: "calculate 2 + 2",
+            },
+          ],
+        },
+      );
+      assert.equal(result.selections.length, 1);
+      assert.equal(result.selections.at(0)!.index, 0);
+    }
+  });
+});
diff --git a/packages/llamaindex/e2e/node/snapshot/#1177.snap b/packages/llamaindex/e2e/node/snapshot/#1177.snap
new file mode 100644
index 0000000000000000000000000000000000000000..4f7869347f389abab901404d05b32504ffd3ea0d
--- /dev/null
+++ b/packages/llamaindex/e2e/node/snapshot/#1177.snap
@@ -0,0 +1,67 @@
+{
+  "llmEventStart": [
+    {
+      "id": "PRESERVE_0",
+      "messages": [
+        {
+          "content": "Some choices are given below. It is provided in a numbered list (1 to 42), where each item in the list corresponds to a summary.\n---------------------\n(1) Math calculation(2) Search from google\n---------------------\nUsing only the choices above and not prior knowledge, return the choice that is most relevant to the question: 'calculate 2 + 2'\n\n\nThe output should be ONLY JSON formatted as a JSON instance.\n\nHere is an example:\n[\n    {\n        \"choice\": 1,\n        \"reason\": \"<insert reason for choice>\"\n    },\n    ...\n]\n",
+          "role": "user"
+        }
+      ]
+    },
+    {
+      "id": "PRESERVE_1",
+      "messages": [
+        {
+          "content": "Some choices are given below. It is provided in a numbered list (1 to 42), where each item in the list corresponds to a summary.\n---------------------\n(1) Math calculation(2) Search from google\n---------------------\nUsing only the choices above and not prior knowledge, return the choice that is most relevant to the question: 'calculate 2 + 2'\n\n\nThe output should be ONLY JSON formatted as a JSON instance.\n\nHere is an example:\n[\n    {\n        \"choice\": 1,\n        \"reason\": \"<insert reason for choice>\"\n    },\n    ...\n]\n",
+          "role": "user"
+        }
+      ]
+    },
+    {
+      "id": "PRESERVE_2",
+      "messages": [
+        {
+          "content": "Some choices are given below. It is provided in a numbered list (1 to 42), where each item in the list corresponds to a summary.\n---------------------\n(1) Math calculation(2) Search from google\n---------------------\nUsing only the choices above and not prior knowledge, return the choice that is most relevant to the question: 'calculate 2 + 2'\n\n\nThe output should be ONLY JSON formatted as a JSON instance.\n\nHere is an example:\n[\n    {\n        \"choice\": 1,\n        \"reason\": \"<insert reason for choice>\"\n    },\n    ...\n]\n",
+          "role": "user"
+        }
+      ]
+    }
+  ],
+  "llmEventEnd": [
+    {
+      "id": "PRESERVE_0",
+      "response": {
+        "raw": null,
+        "message": {
+          "content": "[\n    {\n        \"choice\": 1,\n        \"reason\": \"The question 'calculate 2 + 2' is directly asking for a math calculation, which corresponds to choice 1.\"\n    }\n]",
+          "role": "assistant",
+          "options": {}
+        }
+      }
+    },
+    {
+      "id": "PRESERVE_1",
+      "response": {
+        "raw": null,
+        "message": {
+          "content": "[\n    {\n        \"choice\": 1,\n        \"reason\": \"The question 'calculate 2 + 2' is asking for a mathematical calculation, which directly corresponds to choice 1: Math calculation.\"\n    }\n]",
+          "role": "assistant",
+          "options": {}
+        }
+      }
+    },
+    {
+      "id": "PRESERVE_2",
+      "response": {
+        "raw": null,
+        "message": {
+          "content": "[\n    {\n        \"choice\": 1,\n        \"reason\": \"The question 'calculate 2 + 2' is asking for a mathematical calculation, which directly corresponds to choice 1: Math calculation.\"\n    }\n]",
+          "role": "assistant",
+          "options": {}
+        }
+      }
+    }
+  ],
+  "llmEventStream": []
+}
\ No newline at end of file
diff --git a/packages/llamaindex/src/Settings.ts b/packages/llamaindex/src/Settings.ts
index dbd677ec5e960e953ca7296b949be40c9afddcdf..a4acce25d429fbb24a5af24d3b1daa2e4bb1f946 100644
--- a/packages/llamaindex/src/Settings.ts
+++ b/packages/llamaindex/src/Settings.ts
@@ -59,10 +59,12 @@ class GlobalSettings implements Config {
   }
 
   get llm(): LLM {
-    if (CoreSettings.llm === null) {
+    // fixme: we might need check internal error instead of try-catch here
+    try {
+      CoreSettings.llm;
+    } catch (error) {
       CoreSettings.llm = new OpenAI();
     }
-
     return CoreSettings.llm;
   }
 
diff --git a/packages/llamaindex/src/outputParsers/selectors.ts b/packages/llamaindex/src/outputParsers/selectors.ts
index c2da7afbdc643578c3202a3e5d9136e958a9b8c6..74e59cd2537bebf3e0ccea13d91f66fdb230e7f7 100644
--- a/packages/llamaindex/src/outputParsers/selectors.ts
+++ b/packages/llamaindex/src/outputParsers/selectors.ts
@@ -12,8 +12,8 @@ const formatStr = `The output should be ONLY JSON formatted as a JSON instance.
 Here is an example:
 [
     {
-        choice: 1,
-        reason: "<insert reason for choice>"
+        "choice": 1,
+        "reason": "<insert reason for choice>"
     },
     ...
 ]
diff --git a/packages/llamaindex/src/selectors/llmSelectors.ts b/packages/llamaindex/src/selectors/llmSelectors.ts
index 38db9116a3b2bf8d5fc184b5b8d7e67517a76165..086a893e7e314a3beb508103004b18ae3ffc2d09 100644
--- a/packages/llamaindex/src/selectors/llmSelectors.ts
+++ b/packages/llamaindex/src/selectors/llmSelectors.ts
@@ -159,7 +159,7 @@ export class LLMSingleSelector extends BaseSelector {
     const prompt = this.prompt.format({
       numChoices: `${choicesText.length}`,
       context: choicesText,
-      query: extractText(query.query),
+      query: extractText(query),
     });
 
     const formattedPrompt = this.outputParser.format(prompt);