diff --git a/apps/next/src/content/docs/llamaindex/examples/local_llm.mdx b/apps/next/src/content/docs/llamaindex/examples/local_llm.mdx
index 031019fabe54fdb4d0548ddd7c5030cfc17465c0..6faae94cb2a2e3936282d1b3c7c407e97513898d 100644
--- a/apps/next/src/content/docs/llamaindex/examples/local_llm.mdx
+++ b/apps/next/src/content/docs/llamaindex/examples/local_llm.mdx
@@ -2,6 +2,8 @@
 title: Local LLMs
 ---
 
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
 LlamaIndex.TS supports OpenAI and [other remote LLM APIs](other_llms). You can also run a local LLM on your machine!
 
 ## Using a local model via Ollama
@@ -24,7 +26,23 @@ The first time you run it will also automatically download and install the model
 
 ### Switch the LLM in your code
 
-To tell LlamaIndex to use a local LLM, use the `Settings` object:
+To switch the LLM in your code, you first need to make sure to install the package for the Ollama model provider:
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install @llamaindex/ollama
+	```
+
+	```shell tab="yarn"
+	yarn add @llamaindex/ollama
+	```
+
+	```shell tab="pnpm"
+	pnpm add @llamaindex/ollama
+	```
+</Tabs>
+
+Then, to tell LlamaIndex to use a local LLM, use the `Settings` object:
 
 ```javascript
 Settings.llm = new Ollama({
@@ -34,7 +52,25 @@ Settings.llm = new Ollama({
 
 ### Use local embeddings
 
-If you're doing retrieval-augmented generation, LlamaIndex.TS will also call out to OpenAI to index and embed your data. To be entirely local, you can use a local embedding model like this:
+If you're doing retrieval-augmented generation, LlamaIndex.TS will also call out to OpenAI to index and embed your data. To be entirely local, you can use a local embedding model from Huggingface like this:
+
+First install the Huggingface model provider package: 
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install @llamaindex/huggingface
+	```
+
+	```shell tab="yarn"
+	yarn add @llamaindex/huggingface
+	```
+
+	```shell tab="pnpm"
+	pnpm add @llamaindex/huggingface
+	```
+</Tabs>
+
+And then set the embedding model in your code:
 
 ```javascript
 Settings.embedModel = new HuggingFaceEmbedding({
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/index.mdx b/apps/next/src/content/docs/llamaindex/getting_started/index.mdx
index d9b58049d6d430a07e7ed1df8773c84f4fb94d6e..27cf410524b6ddb3e2cbe94dd01873dd7145d48c 100644
--- a/apps/next/src/content/docs/llamaindex/getting_started/index.mdx
+++ b/apps/next/src/content/docs/llamaindex/getting_started/index.mdx
@@ -1,6 +1,6 @@
 ---
 title: Installation
-description: Install llamaindex by running a single command.
+description: How to install llamaindex packages.
 ---
 
 import { Tab, Tabs } from "fumadocs-ui/components/tabs";
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/setup/cloudflare.mdx b/apps/next/src/content/docs/llamaindex/getting_started/setup/cloudflare.mdx
index 895ff0838b5b038b3aea51479436e71601581e2f..1d696ac0e28d954eeee66f35aa9790f8e2317999 100644
--- a/apps/next/src/content/docs/llamaindex/getting_started/setup/cloudflare.mdx
+++ b/apps/next/src/content/docs/llamaindex/getting_started/setup/cloudflare.mdx
@@ -70,10 +70,8 @@ In Cloudflare Worker and similar serverless JS environment, you need to be aware
 
 - Some Node.js modules are not available in Cloudflare Worker, such as `node:fs`, `node:child_process`, `node:cluster`...
 - You are recommend to design your code using network request, such as use `fetch` API to communicate with database, insteadof a long-running process in Node.js.
-- Some of LlamaIndex.TS modules are not available in Cloudflare Worker, for example `SimpleDirectoryReader` (requires `node:fs`), Some multimodal API that relies on [`onnxruntime-node`](https://www.npmjs.com/package/onnxruntime-node)(we might port to HTTP based module in the future).
-- `@llamaindex/core` is designed to work in all JavaScript environment, including Cloudflare Worker. If you find any issue, please report to us.
+- Some of LlamaIndex.TS packages are not available in Cloudflare Worker, for example `@llamaindex/readers` and `@llamaindex/huggingface`.
+- The main `llamaindex` is designed to work in all JavaScript environment, including Cloudflare Worker. If you find any issue, please report to us.
 - `@llamaindex/env` is a JS environment binding module, which polyfill some Node.js/Modern Web API (for example, we have a memory based `fs` module, and Crypto API polyfill). It is designed to work in all JavaScript environment, including Cloudflare Worker.
 
-## Known issues
 
-- `llamaindex` not work perfectly in Cloudflare Worker, bundle size will be larger than 1MB, which is the limit of Cloudflare Worker. You will need import submodule instead of the whole `llamaindex` module.
diff --git a/apps/next/src/content/docs/llamaindex/index.mdx b/apps/next/src/content/docs/llamaindex/index.mdx
index b673ccb392b2a4480bfa6d8bd33d5ca9d8f98618..2ef5f4e88bb8854cd797a6272876fd8443bca08c 100644
--- a/apps/next/src/content/docs/llamaindex/index.mdx
+++ b/apps/next/src/content/docs/llamaindex/index.mdx
@@ -20,5 +20,5 @@ LlamaIndex.TS provides tools for beginners, advanced users, and everyone in betw
   className="w-full h-[440px]"
   aria-label="LlamaIndex.TS Starter"
   aria-description="This is a starter example for LlamaIndex.TS, it shows the basic usage of the library."
-  src="https://stackblitz.com/github/run-llama/LlamaIndexTS/tree/main/examples?file=starter.ts"
+  src="https://stackblitz.com/github/run-llama/LlamaIndexTS/tree/main/examples?embed=1&file=starter.ts"
 />
diff --git a/apps/next/src/content/docs/llamaindex/meta.json b/apps/next/src/content/docs/llamaindex/meta.json
index 1752c80fdbabd8e983451df722f1026d6c4acd33..60e633950e9368836b916acfcb7718e5c182e26b 100644
--- a/apps/next/src/content/docs/llamaindex/meta.json
+++ b/apps/next/src/content/docs/llamaindex/meta.json
@@ -7,6 +7,7 @@
     "what-is-llamaindex",
     "index",
     "getting_started",
+    "migration",
     "guide",
     "examples",
     "modules",
diff --git a/apps/next/src/content/docs/llamaindex/migration/0.8-to-0.9.mdx b/apps/next/src/content/docs/llamaindex/migration/0.8-to-0.9.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..09c42b4700bca276c30fdf9319fb99f9c5caf009
--- /dev/null
+++ b/apps/next/src/content/docs/llamaindex/migration/0.8-to-0.9.mdx
@@ -0,0 +1,97 @@
+---
+title: Migrating from v0.8 to v0.9
+---
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+Version 0.9 of LlamaIndex.TS introduces significant architectural changes to improve package size and runtime compatibility. The main goals of this release are:
+
+1. Reduce the package size of the main `llamaindex` package by moving dependencies into provider packages, making it more suitable for serverless environments
+2. Enable consistent code across different environments by using unified imports (no separate imports for Node.js and Edge runtimes)
+
+## Major Changes
+
+### Installing Provider Packages
+
+In v0.9, you need to explicitly install the provider packages you want to use. The main `llamaindex` package no longer includes these dependencies by default.
+
+### Updating Imports
+
+You'll need to update your imports to get classes directly from their respective provider packages. Here's how to migrate different components:
+
+### 1. AI Model Providers
+
+Previously:
+```typescript
+import { OpenAI } from "llamaindex";
+```
+
+Now:
+```typescript
+import { OpenAI } from "@llamaindex/openai";
+```
+
+> Note: This examples requires installing the `@llamaindex/openai` package: 
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install @llamaindex/openai
+	```
+
+	```shell tab="yarn"
+	yarn add @llamaindex/openai
+	```
+
+	```shell tab="pnpm"
+	pnpm add @llamaindex/openai
+	```
+</Tabs>
+
+For more details on available AI model providers and their configuration, see the [LLMs documentation](/docs/llamaindex/modules/llms) and the [Embedding Models documentation](/docs/llamaindex/modules/embeddings).
+
+### 2. Storage Providers
+
+Previously:
+```typescript
+import { PineconeVectorStore } from "llamaindex";
+```
+
+Now:
+```typescript
+import { PineconeVectorStore } from "@llamaindex/pinecone";
+```
+
+For more information about available storage options, refer to the [Data Stores documentation](/docs/llamaindex/modules/data_stores).
+
+### 3. Data Loaders
+
+Previously:
+```typescript
+import { SimpleDirectoryReader } from "llamaindex";
+```
+
+Now:
+```typescript
+import { SimpleDirectoryReader } from "@llamaindex/readers/directory";
+```
+
+For more details about available data loaders and their usage, check the [Loading Data](/docs/llamaindex/guide/loading).
+
+### 4. Prefer using `llamaindex` instead of `@llamaindex/core`
+
+`llamaindex` is now re-exporting most of `@llamaindex/core`. To simplify imports, just use `import { ... } from "llamaindex"` instead of `import { ... } from "@llamaindex/core"`. This is possible because `llamaindex` is now a smaller package.
+
+We might change imports internally in `@llamaindex/core` in the future. Let us know if you're missing something. 
+
+## Benefits of the Changes
+
+- **Smaller Bundle Size**: By moving dependencies to separate packages, your application only includes the features you actually use
+- **Runtime Consistency**: The same code works across different environments without environment-specific imports
+- **Improved Serverless Support**: Reduced package size makes it easier to deploy to serverless environments with size limitations
+
+## Need Help?
+
+If you encounter any issues during migration, please:
+1. Check our [GitHub repository](https://github.com/run-llama/LlamaIndexTS) for the latest updates
+2. Join our [Discord community](https://discord.gg/dGcwcsnxhU) for support
+3. Open an issue on GitHub if you find a bug or have a feature request
diff --git a/apps/next/src/content/docs/llamaindex/migration/meta.json b/apps/next/src/content/docs/llamaindex/migration/meta.json
new file mode 100644
index 0000000000000000000000000000000000000000..7be5abb90f4a1309253b39861708a0535868362e
--- /dev/null
+++ b/apps/next/src/content/docs/llamaindex/migration/meta.json
@@ -0,0 +1,5 @@
+{
+  "title": "Migration",
+  "description": "Migration between different versions",
+  "pages": ["0.8-to-0.9"]
+}
diff --git a/examples/package.json b/examples/package.json
index ef020349d34ab14f9c4072de2e2c87ac2ec7d18a..c8c66ad82242273b47f7a8567e5a226c5523d8bd 100644
--- a/examples/package.json
+++ b/examples/package.json
@@ -11,36 +11,36 @@
     "@azure/cosmos": "^4.1.1",
     "@azure/identity": "^4.4.1",
     "@azure/search-documents": "^12.1.0",
-    "@llamaindex/anthropic": "workspace:* || ^0.0.33",
-    "@llamaindex/astra": "workspace:* || ^0.0.4",
-    "@llamaindex/azure": "workspace:* || ^0.0.4",
-    "@llamaindex/chroma": "workspace:* || ^0.0.4",
-    "@llamaindex/clip": "workspace:* || ^0.0.35",
-    "@llamaindex/cloud": "workspace:* || ^2.0.24",
-    "@llamaindex/cohere": "workspace:* || ^0.0.4",
-    "@llamaindex/deepinfra": "workspace:* || ^0.0.35",
-    "@llamaindex/env": "workspace:* || ^0.1.27",
-    "@llamaindex/google": "workspace:* || ^0.0.6",
-    "@llamaindex/groq": "workspace:* || ^0.0.50",
-    "@llamaindex/huggingface": "workspace:* || ^0.0.35",
-    "@llamaindex/milvus": "workspace:* || ^0.0.4",
-    "@llamaindex/mistral": "workspace:* || ^0.0.4",
-    "@llamaindex/mixedbread": "workspace:* || ^0.0.4",
-    "@llamaindex/mongodb": "workspace:* || ^0.0.4",
-    "@llamaindex/node-parser": "workspace:* || ^0.0.24",
-    "@llamaindex/ollama": "workspace:* || ^0.0.39",
-    "@llamaindex/openai": "workspace:* || ^0.1.51",
-    "@llamaindex/pinecone": "workspace:* || ^0.0.4",
-    "@llamaindex/portkey-ai": "workspace:* || ^0.0.32",
-    "@llamaindex/postgres": "workspace:* || ^0.0.32",
-    "@llamaindex/qdrant": "workspace:* || ^0.0.4",
-    "@llamaindex/readers": "workspace:* || ^1.0.25",
-    "@llamaindex/replicate": "workspace:* || ^0.0.32",
-    "@llamaindex/upstash": "workspace:* || ^0.0.4",
-    "@llamaindex/vercel": "workspace:* || ^0.0.10",
-    "@llamaindex/vllm": "workspace:* || ^0.0.21",
-    "@llamaindex/weaviate": "workspace:* || ^0.0.4",
-    "@llamaindex/workflow": "workspace:* || ^0.0.10",
+    "@llamaindex/anthropic": "^0.0.33",
+    "@llamaindex/astra": "^0.0.4",
+    "@llamaindex/azure": "^0.0.4",
+    "@llamaindex/chroma": "^0.0.4",
+    "@llamaindex/clip": "^0.0.35",
+    "@llamaindex/cloud": "^2.0.24",
+    "@llamaindex/cohere": "^0.0.4",
+    "@llamaindex/deepinfra": "^0.0.35",
+    "@llamaindex/env": "^0.1.27",
+    "@llamaindex/google": "^0.0.6",
+    "@llamaindex/groq": "^0.0.50",
+    "@llamaindex/huggingface": "^0.0.35",
+    "@llamaindex/milvus": "^0.0.4",
+    "@llamaindex/mistral": "^0.0.4",
+    "@llamaindex/mixedbread": "^0.0.4",
+    "@llamaindex/mongodb": "^0.0.4",
+    "@llamaindex/node-parser": "^0.0.24",
+    "@llamaindex/ollama": "^0.0.39",
+    "@llamaindex/openai": "^0.1.51",
+    "@llamaindex/pinecone": "^0.0.4",
+    "@llamaindex/portkey-ai": "^0.0.32",
+    "@llamaindex/postgres": "^0.0.32",
+    "@llamaindex/qdrant": "^0.0.4",
+    "@llamaindex/readers": "^1.0.25",
+    "@llamaindex/replicate": "^0.0.32",
+    "@llamaindex/upstash": "^0.0.4",
+    "@llamaindex/vercel": "^0.0.10",
+    "@llamaindex/vllm": "^0.0.21",
+    "@llamaindex/weaviate": "^0.0.4",
+    "@llamaindex/workflow": "^0.0.10",
     "@notionhq/client": "^2.2.15",
     "@pinecone-database/pinecone": "^4.0.0",
     "@vercel/postgres": "^0.10.0",
@@ -49,7 +49,7 @@
     "commander": "^12.1.0",
     "dotenv": "^16.4.5",
     "js-tiktoken": "^1.0.14",
-    "llamaindex": "workspace:* || ^0.8.37",
+    "llamaindex": "^0.8.37",
     "mongodb": "6.7.0",
     "postgres": "^3.4.4",
     "wikipedia": "^2.1.2"
diff --git a/packages/llamaindex/package.json b/packages/llamaindex/package.json
index 26533e318eac529d2383251b3bdaa6e168b4f080..d99de84de8d6d747704edf5e6c3c6c47580da30a 100644
--- a/packages/llamaindex/package.json
+++ b/packages/llamaindex/package.json
@@ -77,29 +77,10 @@
         "default": "./dist/cjs/next.js"
       }
     },
-    "./register": "./register.js",
     "./internal/*": {
       "import": "./dist/not-allow.js",
       "require": "./dist/cjs/not-allow.js"
     },
-    "./readers/SimpleDirectoryReader": {
-      "workerd": {
-        "types": "./dist/type/readers/SimpleDirectoryReader.edge.d.ts",
-        "default": "./dist/readers/SimpleDirectoryReader.edge.js"
-      },
-      "edge-light": {
-        "types": "./dist/type/readers/SimpleDirectoryReader.edge.d.ts",
-        "default": "./dist/readers/SimpleDirectoryReader.edge.js"
-      },
-      "import": {
-        "types": "./dist/type/readers/SimpleDirectoryReader.d.ts",
-        "default": "./dist/readers/SimpleDirectoryReader.js"
-      },
-      "require": {
-        "types": "./dist/type/readers/SimpleDirectoryReader.d.ts",
-        "default": "./dist/cjs/readers/SimpleDirectoryReader.js"
-      }
-    },
     "./*": {
       "import": {
         "types": "./dist/type/*.d.ts",
@@ -112,7 +93,6 @@
     }
   },
   "files": [
-    "./register.js",
     "dist",
     "CHANGELOG.md",
     "examples",
diff --git a/packages/llamaindex/register.js b/packages/llamaindex/register.js
deleted file mode 100644
index 16d8488e2bc62d797da191c3c29c6a8207715ddb..0000000000000000000000000000000000000000
--- a/packages/llamaindex/register.js
+++ /dev/null
@@ -1,6 +0,0 @@
-/**
- * ```shell
- * node --import llamaindex/register ./loader.js
- * ```
- */
-import "@llamaindex/readers/node";
diff --git a/packages/providers/anthropic/package.json b/packages/providers/anthropic/package.json
index 744e354973237f920a6d30ee48cc2bc1393b77b4..18150134a1b7e2017b3190027c57154668075d7a 100644
--- a/packages/providers/anthropic/package.json
+++ b/packages/providers/anthropic/package.json
@@ -1,7 +1,7 @@
 {
   "name": "@llamaindex/anthropic",
   "description": "Anthropic Adapter for LlamaIndex",
-  "version": "0.0.32",
+  "version": "0.0.33",
   "type": "module",
   "main": "./dist/index.cjs",
   "module": "./dist/index.js",
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index 65c36f865f18ba2ecda968e621943f6aa6b07d30..f15cf71c42a77e7282b50a90e9751f89f2d130be 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -572,94 +572,94 @@ importers:
         specifier: ^12.1.0
         version: 12.1.0
       '@llamaindex/anthropic':
-        specifier: workspace:* || ^0.0.33
+        specifier: ^0.0.33
         version: link:../packages/providers/anthropic
       '@llamaindex/astra':
-        specifier: workspace:* || ^0.0.4
+        specifier: ^0.0.4
         version: link:../packages/providers/storage/astra
       '@llamaindex/azure':
-        specifier: workspace:* || ^0.0.4
+        specifier: ^0.0.4
         version: link:../packages/providers/storage/azure
       '@llamaindex/chroma':
-        specifier: workspace:* || ^0.0.4
+        specifier: ^0.0.4
         version: link:../packages/providers/storage/chroma
       '@llamaindex/clip':
-        specifier: workspace:* || ^0.0.35
+        specifier: ^0.0.35
         version: link:../packages/providers/clip
       '@llamaindex/cloud':
-        specifier: workspace:* || ^2.0.24
+        specifier: ^2.0.24
         version: link:../packages/cloud
       '@llamaindex/cohere':
-        specifier: workspace:* || ^0.0.4
+        specifier: ^0.0.4
         version: link:../packages/providers/cohere
       '@llamaindex/deepinfra':
-        specifier: workspace:* || ^0.0.35
+        specifier: ^0.0.35
         version: link:../packages/providers/deepinfra
       '@llamaindex/env':
-        specifier: workspace:* || ^0.1.27
+        specifier: ^0.1.27
         version: link:../packages/env
       '@llamaindex/google':
-        specifier: workspace:* || ^0.0.6
+        specifier: ^0.0.6
         version: link:../packages/providers/google
       '@llamaindex/groq':
-        specifier: workspace:* || ^0.0.50
+        specifier: ^0.0.50
         version: link:../packages/providers/groq
       '@llamaindex/huggingface':
-        specifier: workspace:* || ^0.0.35
+        specifier: ^0.0.35
         version: link:../packages/providers/huggingface
       '@llamaindex/milvus':
-        specifier: workspace:* || ^0.0.4
+        specifier: ^0.0.4
         version: link:../packages/providers/storage/milvus
       '@llamaindex/mistral':
-        specifier: workspace:* || ^0.0.4
+        specifier: ^0.0.4
         version: link:../packages/providers/mistral
       '@llamaindex/mixedbread':
-        specifier: workspace:* || ^0.0.4
+        specifier: ^0.0.4
         version: link:../packages/providers/mixedbread
       '@llamaindex/mongodb':
-        specifier: workspace:* || ^0.0.4
+        specifier: ^0.0.4
         version: link:../packages/providers/storage/mongodb
       '@llamaindex/node-parser':
-        specifier: workspace:* || ^0.0.24
+        specifier: ^0.0.24
         version: link:../packages/node-parser
       '@llamaindex/ollama':
-        specifier: workspace:* || ^0.0.39
+        specifier: ^0.0.39
         version: link:../packages/providers/ollama
       '@llamaindex/openai':
-        specifier: workspace:* || ^0.1.51
+        specifier: ^0.1.51
         version: link:../packages/providers/openai
       '@llamaindex/pinecone':
-        specifier: workspace:* || ^0.0.4
+        specifier: ^0.0.4
         version: link:../packages/providers/storage/pinecone
       '@llamaindex/portkey-ai':
-        specifier: workspace:* || ^0.0.32
+        specifier: ^0.0.32
         version: link:../packages/providers/portkey-ai
       '@llamaindex/postgres':
-        specifier: workspace:* || ^0.0.32
+        specifier: ^0.0.32
         version: link:../packages/providers/storage/postgres
       '@llamaindex/qdrant':
-        specifier: workspace:* || ^0.0.4
+        specifier: ^0.0.4
         version: link:../packages/providers/storage/qdrant
       '@llamaindex/readers':
-        specifier: workspace:* || ^1.0.25
+        specifier: ^1.0.25
         version: link:../packages/readers
       '@llamaindex/replicate':
-        specifier: workspace:* || ^0.0.32
+        specifier: ^0.0.32
         version: link:../packages/providers/replicate
       '@llamaindex/upstash':
-        specifier: workspace:* || ^0.0.4
+        specifier: ^0.0.4
         version: link:../packages/providers/storage/upstash
       '@llamaindex/vercel':
-        specifier: workspace:* || ^0.0.10
+        specifier: ^0.0.10
         version: link:../packages/providers/vercel
       '@llamaindex/vllm':
-        specifier: workspace:* || ^0.0.21
+        specifier: ^0.0.21
         version: link:../packages/providers/vllm
       '@llamaindex/weaviate':
-        specifier: workspace:* || ^0.0.4
+        specifier: ^0.0.4
         version: link:../packages/providers/storage/weaviate
       '@llamaindex/workflow':
-        specifier: workspace:* || ^0.0.10
+        specifier: ^0.0.10
         version: link:../packages/workflow
       '@notionhq/client':
         specifier: ^2.2.15
@@ -686,7 +686,7 @@ importers:
         specifier: ^1.0.14
         version: 1.0.18
       llamaindex:
-        specifier: workspace:* || ^0.8.37
+        specifier: ^0.8.37
         version: link:../packages/llamaindex
       mongodb:
         specifier: 6.7.0