From 9c73f0a53036410c878b8bb4834a6a45499001bb Mon Sep 17 00:00:00 2001
From: Alex Yang <himself65@outlook.com>
Date: Thu, 31 Oct 2024 11:06:14 -0700
Subject: [PATCH] fix(core): async local storage in `Setting.with` API (#1413)

---
 .changeset/tall-badgers-melt.md               |  5 ++++
 .../core/src/global/settings/chunk-size.ts    |  2 +-
 packages/core/src/global/settings/llm.ts      |  2 +-
 .../core/src/global/settings/tokenizer.ts     |  2 +-
 pnpm-lock.yaml                                |  6 +++++
 unit/package.json                             |  2 ++
 unit/settings/global.test.ts                  | 24 +++++++++++++++++++
 7 files changed, 40 insertions(+), 3 deletions(-)
 create mode 100644 .changeset/tall-badgers-melt.md
 create mode 100644 unit/settings/global.test.ts

diff --git a/.changeset/tall-badgers-melt.md b/.changeset/tall-badgers-melt.md
new file mode 100644
index 000000000..e3f9b572d
--- /dev/null
+++ b/.changeset/tall-badgers-melt.md
@@ -0,0 +1,5 @@
+---
+"@llamaindex/core": patch
+---
+
+fix: async local storage in `Setting.with` API
diff --git a/packages/core/src/global/settings/chunk-size.ts b/packages/core/src/global/settings/chunk-size.ts
index 6d24017f4..4ccb1f987 100644
--- a/packages/core/src/global/settings/chunk-size.ts
+++ b/packages/core/src/global/settings/chunk-size.ts
@@ -4,7 +4,7 @@ const chunkSizeAsyncLocalStorage = new AsyncLocalStorage<number | undefined>();
 let globalChunkSize: number = 1024;
 
 export function getChunkSize(): number {
-  return globalChunkSize ?? chunkSizeAsyncLocalStorage.getStore();
+  return chunkSizeAsyncLocalStorage.getStore() ?? globalChunkSize;
 }
 
 export function setChunkSize(chunkSize: number | undefined) {
diff --git a/packages/core/src/global/settings/llm.ts b/packages/core/src/global/settings/llm.ts
index 9309b3255..1f4716415 100644
--- a/packages/core/src/global/settings/llm.ts
+++ b/packages/core/src/global/settings/llm.ts
@@ -5,7 +5,7 @@ const llmAsyncLocalStorage = new AsyncLocalStorage<LLM>();
 let globalLLM: LLM | undefined;
 
 export function getLLM(): LLM {
-  const currentLLM = globalLLM ?? llmAsyncLocalStorage.getStore();
+  const currentLLM = llmAsyncLocalStorage.getStore() ?? globalLLM;
   if (!currentLLM) {
     throw new Error(
       "Cannot find LLM, please set `Settings.llm = ...` on the top of your code",
diff --git a/packages/core/src/global/settings/tokenizer.ts b/packages/core/src/global/settings/tokenizer.ts
index dae154c83..bdb6e9943 100644
--- a/packages/core/src/global/settings/tokenizer.ts
+++ b/packages/core/src/global/settings/tokenizer.ts
@@ -4,7 +4,7 @@ const chunkSizeAsyncLocalStorage = new AsyncLocalStorage<Tokenizer>();
 let globalTokenizer: Tokenizer = tokenizers.tokenizer();
 
 export function getTokenizer(): Tokenizer {
-  return globalTokenizer ?? chunkSizeAsyncLocalStorage.getStore();
+  return chunkSizeAsyncLocalStorage.getStore() ?? globalTokenizer;
 }
 
 export function setTokenizer(tokenizer: Tokenizer | undefined) {
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index 46bfc0330..6807df6d6 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -1324,6 +1324,12 @@ importers:
       '@llamaindex/cloud':
         specifier: workspace:*
         version: link:../packages/cloud
+      '@llamaindex/core':
+        specifier: workspace:*
+        version: link:../packages/core
+      '@llamaindex/openai':
+        specifier: workspace:*
+        version: link:../packages/providers/openai
       '@llamaindex/readers':
         specifier: workspace:*
         version: link:../packages/readers
diff --git a/unit/package.json b/unit/package.json
index ecafc9b39..f9aabd7cb 100644
--- a/unit/package.json
+++ b/unit/package.json
@@ -13,6 +13,8 @@
   },
   "dependencies": {
     "@llamaindex/cloud": "workspace:*",
+    "@llamaindex/core": "workspace:*",
+    "@llamaindex/openai": "workspace:*",
     "@llamaindex/readers": "workspace:*",
     "llamaindex": "workspace:*"
   }
diff --git a/unit/settings/global.test.ts b/unit/settings/global.test.ts
new file mode 100644
index 000000000..8e33d7bbd
--- /dev/null
+++ b/unit/settings/global.test.ts
@@ -0,0 +1,24 @@
+import { Settings as RootSettings } from "@llamaindex/core/global";
+import { OpenAI } from "@llamaindex/openai";
+import { Settings } from "llamaindex";
+import { beforeEach, expect, test } from "vitest";
+const defaultLLM = new OpenAI();
+
+beforeEach(() => {
+  RootSettings.llm = defaultLLM;
+});
+
+test("async local storage with core", () => {
+  const symbol = Symbol("llm");
+  RootSettings.withLLM(symbol as never, () => {
+    expect(RootSettings.llm).toBe(symbol);
+  });
+});
+
+test("async local storage with llamaindex", () => {
+  const symbol = Symbol("llm");
+  Settings.withLLM(symbol as never, () => {
+    expect(Settings.llm).toBe(symbol);
+    expect(RootSettings.llm).toBe(symbol);
+  });
+});
-- 
GitLab