diff --git a/.changeset/tall-badgers-melt.md b/.changeset/tall-badgers-melt.md
new file mode 100644
index 0000000000000000000000000000000000000000..e3f9b572d4368fe2052ad5672714b227599df8b1
--- /dev/null
+++ b/.changeset/tall-badgers-melt.md
@@ -0,0 +1,5 @@
+---
+"@llamaindex/core": patch
+---
+
+fix: async local storage in `Setting.with` API
diff --git a/packages/core/src/global/settings/chunk-size.ts b/packages/core/src/global/settings/chunk-size.ts
index 6d24017f43e9de5e20a646118cdec237643679b6..4ccb1f987d97afa0d6132873aff47fc814574b22 100644
--- a/packages/core/src/global/settings/chunk-size.ts
+++ b/packages/core/src/global/settings/chunk-size.ts
@@ -4,7 +4,7 @@ const chunkSizeAsyncLocalStorage = new AsyncLocalStorage<number | undefined>();
 let globalChunkSize: number = 1024;
 
 export function getChunkSize(): number {
-  return globalChunkSize ?? chunkSizeAsyncLocalStorage.getStore();
+  return chunkSizeAsyncLocalStorage.getStore() ?? globalChunkSize;
 }
 
 export function setChunkSize(chunkSize: number | undefined) {
diff --git a/packages/core/src/global/settings/llm.ts b/packages/core/src/global/settings/llm.ts
index 9309b3255c6b2f4e1d35abc93dc2ec4f58768fb9..1f471641541a559a981ddd62b1cf585d24d79381 100644
--- a/packages/core/src/global/settings/llm.ts
+++ b/packages/core/src/global/settings/llm.ts
@@ -5,7 +5,7 @@ const llmAsyncLocalStorage = new AsyncLocalStorage<LLM>();
 let globalLLM: LLM | undefined;
 
 export function getLLM(): LLM {
-  const currentLLM = globalLLM ?? llmAsyncLocalStorage.getStore();
+  const currentLLM = llmAsyncLocalStorage.getStore() ?? globalLLM;
   if (!currentLLM) {
     throw new Error(
       "Cannot find LLM, please set `Settings.llm = ...` on the top of your code",
diff --git a/packages/core/src/global/settings/tokenizer.ts b/packages/core/src/global/settings/tokenizer.ts
index dae154c83ff5fd2ca106aabe39cbd9f1f32fdbc9..bdb6e9943e1d286af7bb5576f55fbb3c1dc85c05 100644
--- a/packages/core/src/global/settings/tokenizer.ts
+++ b/packages/core/src/global/settings/tokenizer.ts
@@ -4,7 +4,7 @@ const chunkSizeAsyncLocalStorage = new AsyncLocalStorage<Tokenizer>();
 let globalTokenizer: Tokenizer = tokenizers.tokenizer();
 
 export function getTokenizer(): Tokenizer {
-  return globalTokenizer ?? chunkSizeAsyncLocalStorage.getStore();
+  return chunkSizeAsyncLocalStorage.getStore() ?? globalTokenizer;
 }
 
 export function setTokenizer(tokenizer: Tokenizer | undefined) {
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index 46bfc0330e8469f56b205e872bfed92ae9e2a431..6807df6d68aec6a21567742a623f75227c37e815 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -1324,6 +1324,12 @@ importers:
       '@llamaindex/cloud':
         specifier: workspace:*
         version: link:../packages/cloud
+      '@llamaindex/core':
+        specifier: workspace:*
+        version: link:../packages/core
+      '@llamaindex/openai':
+        specifier: workspace:*
+        version: link:../packages/providers/openai
       '@llamaindex/readers':
         specifier: workspace:*
         version: link:../packages/readers
diff --git a/unit/package.json b/unit/package.json
index ecafc9b392e0911a3c1ce5c8c9f8b093a9171789..f9aabd7cb720c48e013ef170ca930b7496e7e465 100644
--- a/unit/package.json
+++ b/unit/package.json
@@ -13,6 +13,8 @@
   },
   "dependencies": {
     "@llamaindex/cloud": "workspace:*",
+    "@llamaindex/core": "workspace:*",
+    "@llamaindex/openai": "workspace:*",
     "@llamaindex/readers": "workspace:*",
     "llamaindex": "workspace:*"
   }
diff --git a/unit/settings/global.test.ts b/unit/settings/global.test.ts
new file mode 100644
index 0000000000000000000000000000000000000000..8e33d7bbd9228ecb3fdaf2beaa4d19d3ff067a6d
--- /dev/null
+++ b/unit/settings/global.test.ts
@@ -0,0 +1,24 @@
+import { Settings as RootSettings } from "@llamaindex/core/global";
+import { OpenAI } from "@llamaindex/openai";
+import { Settings } from "llamaindex";
+import { beforeEach, expect, test } from "vitest";
+const defaultLLM = new OpenAI();
+
+beforeEach(() => {
+  RootSettings.llm = defaultLLM;
+});
+
+test("async local storage with core", () => {
+  const symbol = Symbol("llm");
+  RootSettings.withLLM(symbol as never, () => {
+    expect(RootSettings.llm).toBe(symbol);
+  });
+});
+
+test("async local storage with llamaindex", () => {
+  const symbol = Symbol("llm");
+  Settings.withLLM(symbol as never, () => {
+    expect(Settings.llm).toBe(symbol);
+    expect(RootSettings.llm).toBe(symbol);
+  });
+});