From 6d4d96f8feb530e2a5165c1cb3fc2a055a01d596 Mon Sep 17 00:00:00 2001
From: Thuc Pham <51660321+thucpn@users.noreply.github.com>
Date: Mon, 10 Feb 2025 15:43:29 +0700
Subject: [PATCH] chore: update import to workspace pages for examples and docs
 (#1626)

Co-authored-by: Marcus Schiesser <mail@marcusschiesser.de>
---
 .changeset/flat-mirrors-dream.md              |   5 +
 apps/next/src/app/(home)/page.tsx             |  14 +-
 .../docs/llamaindex/examples/agent_gemini.mdx |  20 +++
 .../examples/context_aware_agent.mdx          |   3 +-
 .../docs/llamaindex/examples/other_llms.mdx   |  29 +++-
 .../docs/llamaindex/getting_started/index.mdx |  21 +++
 .../getting_started/setup/typescript.mdx      |   4 +-
 .../guide/agents/2_create_agent.mdx           |   3 +-
 .../llamaindex/guide/agents/4_agentic_rag.mdx |  34 +++--
 .../docs/llamaindex/guide/loading/index.mdx   |  12 +-
 .../llamaindex/guide/loading/node-parser.mdx  |   9 +-
 .../docs/llamaindex/integration/vercel.mdx    |   2 +-
 .../llamaindex/modules/data_loaders/json.mdx  |  20 ++-
 .../data_loaders/llama_parse/images.mdx       |  30 +++--
 .../data_loaders/llama_parse/json_mode.mdx    |  23 +++-
 .../modules/data_stores/doc_stores/index.mdx  |  32 +++++
 .../llamaindex/modules/data_stores/index.mdx  |  33 +----
 .../data_stores/index_stores/index.mdx        |  32 +++++
 .../data_stores/vector_stores/qdrant.mdx      |  24 +++-
 .../metadata_extraction.mdx                   |   9 +-
 .../available_embeddings/deepinfra.mdx        |  33 +++--
 .../available_embeddings/gemini.mdx           |  25 +++-
 .../available_embeddings/huggingface.mdx      |  25 +++-
 .../available_embeddings/mistral.mdx          |  23 +++-
 .../available_embeddings/mixedbreadai.mdx     |  22 ++-
 .../available_embeddings/ollama.mdx           |  23 +++-
 .../available_embeddings/openai.mdx           |  23 +++-
 .../llamaindex/modules/embeddings/index.mdx   |  21 ++-
 .../evaluation/modules/correctness.mdx        |  21 ++-
 .../evaluation/modules/faithfulness.mdx       |  23 +++-
 .../modules/evaluation/modules/relevancy.mdx  |  23 +++-
 .../modules/ingestion_pipeline/index.mdx      |  25 +++-
 .../modules/llms/available_llms/anthropic.mdx |  24 +++-
 .../modules/llms/available_llms/azure.mdx     |  24 +++-
 .../modules/llms/available_llms/bedrock.mdx   |  18 +++
 .../modules/llms/available_llms/deepinfra.mdx |  26 +++-
 .../modules/llms/available_llms/gemini.mdx    |  34 +++--
 .../modules/llms/available_llms/groq.mdx      |  23 +++-
 .../modules/llms/available_llms/llama2.mdx    |  38 ++++--
 .../modules/llms/available_llms/mistral.mdx   |  26 +++-
 .../modules/llms/available_llms/ollama.mdx    |  27 +++-
 .../modules/llms/available_llms/openai.mdx    |  27 +++-
 .../modules/llms/available_llms/portkey.mdx   |  27 +++-
 .../modules/llms/available_llms/together.mdx  |  25 +++-
 .../docs/llamaindex/modules/llms/index.mdx    |  23 +++-
 .../docs/llamaindex/modules/node_parser.mdx   |   4 +-
 .../node_postprocessors/cohere_reranker.mdx   |  28 ++--
 .../modules/node_postprocessors/index.mdx     |  34 +++--
 .../node_postprocessors/jinaai_reranker.mdx   |  28 ++--
 .../mixedbreadiai_reranker.mdx                |  23 +++-
 .../query_engines/metadata_filtering.mdx      |  34 ++---
 .../query_engines/router_query_engine.mdx     |  24 +++-
 .../modules/response_synthesizer.mdx          |   2 +-
 .../docs/llamaindex/modules/workflows.mdx     |   2 +-
 e2e/examples/llama-parse-browser/src/main.ts  |   2 +-
 examples/Settings.ts                          |   3 +-
 examples/agent/large_toolcall.ts              |   3 +-
 examples/agent/large_toolcall_with_gpt4o.ts   |   3 +-
 examples/agent/multi_document_agent.ts        |   3 +-
 examples/agent/openai-task.ts                 |   3 +-
 examples/agent/openai.ts                      |   3 +-
 examples/agent/query_openai_agent.ts          |   2 +-
 examples/agent/react_agent.ts                 |   3 +-
 examples/agent/retriever_openai_agent.ts      |   2 +-
 examples/agent/step_wise_query_tool.ts        |   2 +-
 examples/agent/step_wise_react.ts             |   3 +-
 examples/agent/stream_openai_agent.ts         |   3 +-
 examples/agent/wiki.ts                        |   2 +-
 examples/anthropic/agent.ts                   |   4 +-
 examples/anthropic/chat.ts                    |   2 +-
 examples/anthropic/chat_interactive.ts        |   3 +-
 examples/anthropic/haiku.ts                   |   2 +-
 examples/anthropic/prompt-caching.ts          |   2 +-
 examples/anthropic/stream.ts                  |   2 +-
 examples/astradb/example.ts                   |   2 +-
 examples/astradb/load.ts                      |   9 +-
 examples/astradb/query.ts                     |   7 +-
 examples/azure-cosmosdb.ts                    |   6 +-
 examples/azure/azure-openai.ts                |   2 +-
 examples/azure/azure_dynamic_session.ts       |   4 +-
 examples/chatHistory.ts                       |   2 +-
 examples/chromadb/preFilters.ts               |   2 +-
 examples/chromadb/test.ts                     |   9 +-
 examples/cosmosdb/loadVectorData.ts           |   5 +-
 examples/cosmosdb/queryVectorData.ts          |   5 +-
 examples/cosmosdb/utils.ts                    |   2 +-
 examples/deepinfra/chat.ts                    |   2 +-
 examples/deepinfra/embedding.ts               |   2 +-
 examples/evaluation/correctness.ts            |   3 +-
 examples/evaluation/faithfulness.ts           |   2 +-
 examples/evaluation/relevancy.ts              |   2 +-
 examples/extractors/keywordExtractor.ts       |   8 +-
 .../extractors/questionsAnsweredExtractor.ts  |   2 +-
 examples/extractors/summaryExtractor.ts       |   8 +-
 examples/extractors/titleExtractor.ts         |   3 +-
 examples/gemini/agent.ts                      |   9 +-
 examples/gemini/chat.ts                       |   2 +-
 examples/gemini/chatVertex.ts                 |   2 +-
 examples/gemini/embedding.ts                  |   2 +-
 examples/gptllama.ts                          |   4 +-
 examples/gptturbollama3.ts                    |   4 +-
 examples/groq.ts                              |  10 +-
 examples/huggingface/chat.ts                  |   2 +-
 examples/huggingface/embedding.ts             |   6 +-
 examples/huggingface/embeddingApi.ts          |   6 +-
 examples/huggingface/local.ts                 |   2 +-
 examples/ingestion/basicIngestion.ts          |   2 +-
 examples/jsonExtract.ts                       |   2 +-
 examples/llama3.ts                            |   2 +-
 examples/llamadeuce.ts                        |   2 +-
 examples/markdown.ts                          |   3 +-
 examples/metadata-filter/milvus.ts            |   3 +-
 examples/milvus/load.ts                       |   9 +-
 examples/milvus/query.ts                      |   3 +-
 examples/mistral.ts                           |   9 +-
 examples/mongo.ts                             |   3 +-
 examples/mongodb/2_load_and_index.ts          |   7 +-
 examples/mongodb/3_query.ts                   |   3 +-
 examples/multimodal/chat.ts                   |   3 +-
 examples/multimodal/clip.ts                   |   3 +-
 examples/multimodal/context.ts                |   4 +-
 examples/multimodal/rag.ts                    |   4 +-
 examples/ollama.ts                            |   2 +-
 examples/openai.ts                            |   2 +-
 examples/openai_o1.ts                         |   2 +-
 examples/package.json                         |  36 +++--
 examples/pinecone-vector-store/load-docs.ts   |   2 +-
 examples/pinecone-vector-store/query.ts       |   3 +-
 examples/portkey.ts                           |   2 +-
 examples/qdrantdb/preFilters.ts               |   2 +-
 examples/readers/package.json                 |   3 +-
 examples/readers/src/assemblyai.ts            |   7 +-
 examples/readers/src/csv.ts                   |   2 +-
 examples/readers/src/llamaparse-docx.ts       |   2 +-
 examples/readers/src/llamaparse-json.ts       |   6 +-
 examples/readers/src/llamaparse.ts            |   3 +-
 examples/readers/src/pdf_fw_openai.ts         |   3 +-
 ...simple-directory-reader-with-llamaparse.ts |   4 +-
 .../readers/src/simple-directory-reader.ts    |   2 +-
 examples/readonly.ts                          |   7 +-
 examples/recipes/cost-analysis.ts             |   4 +-
 examples/rerankers/CohereReranker.ts          |  10 +-
 examples/routerQueryEngine.ts                 |   2 +-
 examples/sentenceWindow.ts                    |   2 +-
 examples/toolsStream.ts                       |   2 +-
 examples/vector-store/azure/index.ts          |  13 +-
 examples/vector-store/pg/load-docs.ts         |   2 +-
 examples/vector-store/pg/supabase.ts          |   3 +-
 examples/vectorIndexAnthropic.ts              |   2 +-
 examples/vectorIndexCustomize.ts              |   2 +-
 examples/vectorIndexEmbed3.ts                 |   8 +-
 examples/vectorIndexFromVectorStore.ts        |   6 +-
 examples/vectorIndexGPT4.ts                   |   3 +-
 examples/vectorIndexLocal.ts                  |  10 +-
 examples/vision.ts                            |   2 +-
 examples/vllm.ts                              |   2 +-
 examples/weaviate/load.ts                     |   9 +-
 examples/weaviate/query.ts                    |   3 +-
 examples/wiki.ts                              |   2 +-
 examples/workflow/app-creator.ts              |   2 +-
 examples/workflow/conditional.ts              |   2 +-
 examples/workflow/joke.ts                     |   2 +-
 examples/workflow/stream-events.ts            |   2 +-
 examples/workflow/validation.ts               |   2 +-
 packages/cloud/package.json                   |  14 ++
 packages/llamaindex/src/index.edge.ts         |   1 +
 pnpm-lock.yaml                                | 126 ++++++++++++------
 167 files changed, 1253 insertions(+), 492 deletions(-)
 create mode 100644 .changeset/flat-mirrors-dream.md

diff --git a/.changeset/flat-mirrors-dream.md b/.changeset/flat-mirrors-dream.md
new file mode 100644
index 000000000..a0cb414cb
--- /dev/null
+++ b/.changeset/flat-mirrors-dream.md
@@ -0,0 +1,5 @@
+---
+"@llamaindex/doc": patch
+---
+
+chore: update examples and docs to use unified imports
diff --git a/apps/next/src/app/(home)/page.tsx b/apps/next/src/app/(home)/page.tsx
index f578a1302..db5ca76b1 100644
--- a/apps/next/src/app/(home)/page.tsx
+++ b/apps/next/src/app/(home)/page.tsx
@@ -76,15 +76,19 @@ export default function HomePage() {
           >
             <MagicMove
               code={[
-                `import { OpenAI } from "llamaindex";
+                `import { OpenAI } from "@llamaindex/openai";
+
 const llm = new OpenAI();
 const response = await llm.complete({ prompt: "How are you?" });`,
-                `import { OpenAI } from "llamaindex";
+                `import { OpenAI } from "@llamaindex/openai";
+
 const llm = new OpenAI();
 const response = await llm.chat({
   messages: [{ content: "Tell me a joke.", role: "user" }],
 });`,
-                `import { OpenAI, ChatMemoryBuffer } from "llamaindex";
+                `import { ChatMemoryBuffer } from "llamaindex";
+import { OpenAI } from "@llamaindex/openai";
+
 const llm = new OpenAI({ model: 'gpt4o-turbo' });
 const buffer = new ChatMemoryBuffer({
   tokenLimit: 128_000,
@@ -94,7 +98,9 @@ const response = await llm.chat({
   messages: buffer.getMessages(),
   stream: true
 });`,
-                `import { OpenAIAgent, ChatMemoryBuffer } from "llamaindex";
+                `import { ChatMemoryBuffer } from "llamaindex";
+import { OpenAIAgent } from "@llamaindex/openai";
+
 const agent = new OpenAIAgent({
   llm,
   tools: [...myTools]
diff --git a/apps/next/src/content/docs/llamaindex/examples/agent_gemini.mdx b/apps/next/src/content/docs/llamaindex/examples/agent_gemini.mdx
index 38bc4ef72..e9caf43ac 100644
--- a/apps/next/src/content/docs/llamaindex/examples/agent_gemini.mdx
+++ b/apps/next/src/content/docs/llamaindex/examples/agent_gemini.mdx
@@ -5,4 +5,24 @@ title: Gemini Agent
 import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock';
 import CodeSourceGemini from "!raw-loader!../../../../../../../examples/gemini/agent.ts";
 
+## Installation
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/google
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/google
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/google
+	```
+</Tabs>
+
+## Source 
+
 <DynamicCodeBlock lang="ts" code={CodeSourceGemini} />
diff --git a/apps/next/src/content/docs/llamaindex/examples/context_aware_agent.mdx b/apps/next/src/content/docs/llamaindex/examples/context_aware_agent.mdx
index 6aa5c5a4f..6295c5f3c 100644
--- a/apps/next/src/content/docs/llamaindex/examples/context_aware_agent.mdx
+++ b/apps/next/src/content/docs/llamaindex/examples/context_aware_agent.mdx
@@ -12,9 +12,8 @@ Here's a simple example of how to use the Context-Aware Agent:
 import {
   Document,
   VectorStoreIndex,
-  OpenAIContextAwareAgent,
-  OpenAI,
 } from "llamaindex";
+import { OpenAI, OpenAIContextAwareAgent } from "@llamaindex/openai";
 
 async function createContextAwareAgent() {
   // Create and index some documents
diff --git a/apps/next/src/content/docs/llamaindex/examples/other_llms.mdx b/apps/next/src/content/docs/llamaindex/examples/other_llms.mdx
index 5fdc6bbaa..1adb04ff1 100644
--- a/apps/next/src/content/docs/llamaindex/examples/other_llms.mdx
+++ b/apps/next/src/content/docs/llamaindex/examples/other_llms.mdx
@@ -7,14 +7,36 @@ import CodeSource from "!raw-loader!../../../../../../../examples/mistral";
 
 By default LlamaIndex.TS uses OpenAI's LLMs and embedding models, but we support [lots of other LLMs](../modules/llms) including models from Mistral (Mistral, Mixtral), Anthropic (Claude) and Google (Gemini).
 
-If you don't want to use an API at all you can [run a local model](../../examples/local_llm)
+If you don't want to use an API at all you can [run a local model](../../examples/local_llm).
+
+This example runs you through the process of setting up a Mistral model:
+
+
+## Installation
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/mistral
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/mistral
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/mistral
+	```
+</Tabs>
 
 ## Using another LLM
 
 You can specify what LLM LlamaIndex.TS will use on the `Settings` object, like this:
 
 ```typescript
-import { MistralAI, Settings } from "llamaindex";
+import { MistralAI } from "@llamaindex/mistral";
+import { Settings } from "llamaindex";
 
 Settings.llm = new MistralAI({
   model: "mistral-tiny",
@@ -29,7 +51,8 @@ You can see examples of other APIs we support by checking out "Available LLMs" i
 A frequent gotcha when trying to use a different API as your LLM is that LlamaIndex will also by default index and embed your data using OpenAI's embeddings. To completely switch away from OpenAI you will need to set your embedding model as well, for example:
 
 ```typescript
-import { MistralAIEmbedding, Settings } from "llamaindex";
+import { MistralAIEmbedding } from "@llamaindex/mistral";
+import { Settings } from "llamaindex";
 
 Settings.embedModel = new MistralAIEmbedding();
 ```
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/index.mdx b/apps/next/src/content/docs/llamaindex/getting_started/index.mdx
index 01e71a398..d9b58049d 100644
--- a/apps/next/src/content/docs/llamaindex/getting_started/index.mdx
+++ b/apps/next/src/content/docs/llamaindex/getting_started/index.mdx
@@ -5,6 +5,8 @@ description: Install llamaindex by running a single command.
 
 import { Tab, Tabs } from "fumadocs-ui/components/tabs";
 
+To install llamaindex, run the following command:
+
 <Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
 	```shell tab="npm"
 	npm install llamaindex
@@ -19,6 +21,25 @@ import { Tab, Tabs } from "fumadocs-ui/components/tabs";
 	```
 </Tabs>
 
+In most cases, you'll also need an LLM package to use LlamaIndex. For example, to use the OpenAI LLM, you would install the following:
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install @llamaindex/openai
+	```
+
+	```shell tab="yarn"
+	yarn add @llamaindex/openai
+	```
+
+	```shell tab="pnpm"
+	pnpm add @llamaindex/openai
+	```
+</Tabs>
+
+Go to [Using other LLM APIs](/docs/llamaindex/examples/other_llms) to find out how to use other LLMs.
+
+
 ## What's next?
 
 <Cards>
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/setup/typescript.mdx b/apps/next/src/content/docs/llamaindex/getting_started/setup/typescript.mdx
index b25e5ee54..c9d92f3cb 100644
--- a/apps/next/src/content/docs/llamaindex/getting_started/setup/typescript.mdx
+++ b/apps/next/src/content/docs/llamaindex/getting_started/setup/typescript.mdx
@@ -9,7 +9,7 @@ LlamaIndex.TS is written in TypeScript and designed to be used in TypeScript pro
 We do lots of work on strong typing to make sure you have a great typing experience with LlamaIndex.TS.
 
 ```ts twoslash
-import { PromptTemplate } from '@llamaindex/core/prompts'
+import { PromptTemplate } from 'llamaindex'
 const promptTemplate = new PromptTemplate({
   template: `Context information from multiple sources is below.
 ---------------------
@@ -29,7 +29,7 @@ promptTemplate.format({
 ```
 
 ```ts twoslash
-import { FunctionTool } from '@llamaindex/core/tools'
+import { FunctionTool } from 'llamaindex'
 import { z } from 'zod'
 
 // ---cut-before---
diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/2_create_agent.mdx b/apps/next/src/content/docs/llamaindex/guide/agents/2_create_agent.mdx
index 860af4573..37fc24b20 100644
--- a/apps/next/src/content/docs/llamaindex/guide/agents/2_create_agent.mdx
+++ b/apps/next/src/content/docs/llamaindex/guide/agents/2_create_agent.mdx
@@ -31,7 +31,8 @@ First we'll need to pull in our dependencies. These are:
 - Dotenv to load our API key from the .env file
 
 ```javascript
-import { OpenAI, FunctionTool, OpenAIAgent, Settings } from "llamaindex";
+import { FunctionTool, Settings } from "llamaindex";
+import { OpenAI, OpenAIAgent } from "@llamaindex/openai";
 import "dotenv/config";
 ```
 
diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/4_agentic_rag.mdx b/apps/next/src/content/docs/llamaindex/guide/agents/4_agentic_rag.mdx
index 465f38299..b690120fb 100644
--- a/apps/next/src/content/docs/llamaindex/guide/agents/4_agentic_rag.mdx
+++ b/apps/next/src/content/docs/llamaindex/guide/agents/4_agentic_rag.mdx
@@ -13,22 +13,34 @@ To learn more about RAG, we recommend this [introduction](https://docs.llamainde
 
 We're going to start with the same agent we [built in step 1](https://github.com/run-llama/ts-agents/blob/main/1_agent/agent.ts), but make a few changes. You can find the finished version [in the repository](https://github.com/run-llama/ts-agents/blob/main/2_agentic_rag/agent.ts).
 
+## Installation
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/openai @llamaindex/huggingface 
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/openai @llamaindex/huggingface 
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/openai @llamaindex/huggingface 
+	```
+</Tabs>
+
+
 ### New dependencies
 
 We'll be bringing in `SimpleDirectoryReader`, `HuggingFaceEmbedding`, `VectorStoreIndex`, and `QueryEngineTool`, `OpenAIContextAwareAgent` from LlamaIndex.TS, as well as the dependencies we previously used.
 
 ```javascript
-import {
-  OpenAI,
-  FunctionTool,
-  OpenAIAgent,
-  OpenAIContextAwareAgent,
-  Settings,
-  SimpleDirectoryReader,
-  HuggingFaceEmbedding,
-  VectorStoreIndex,
-  QueryEngineTool,
-} from "llamaindex";
+import { FunctionTool, QueryEngineTool, Settings, VectorStoreIndex } from "llamaindex";
+import { OpenAI, OpenAIAgent } from "@llamaindex/openai";
+import { HuggingFaceEmbedding } from "@llamaindex/huggingface";
+import { SimpleDirectoryReader } from "llamaindex";
 ```
 
 ### Add an embedding model
diff --git a/apps/next/src/content/docs/llamaindex/guide/loading/index.mdx b/apps/next/src/content/docs/llamaindex/guide/loading/index.mdx
index 874788ffe..91d91868f 100644
--- a/apps/next/src/content/docs/llamaindex/guide/loading/index.mdx
+++ b/apps/next/src/content/docs/llamaindex/guide/loading/index.mdx
@@ -33,11 +33,11 @@ We offer readers for different file formats.
 
 <Tabs groupId="llamaindex-or-readers" items={["llamaindex", "@llamaindex/readers"]} persist>
 	```ts twoslash tab="llamaindex"
-	import { CSVReader } from 'llamaindex'
-	import { PDFReader } from 'llamaindex'
-	import { JSONReader } from 'llamaindex'
-	import { MarkdownReader } from 'llamaindex'
-	import { HTMLReader } from 'llamaindex'
+	import { CSVReader } from '@llamaindex/readers/csv'
+	import { PDFReader } from '@llamaindex/readers/pdf'
+	import { JSONReader } from '@llamaindex/readers/json'
+	import { MarkdownReader } from '@llamaindex/readers/markdown'
+	import { HTMLReader } from '@llamaindex/readers/html'
 	// you can find more readers in the documentation
 	```
 
@@ -71,7 +71,7 @@ We offer readers for different file formats.
 	```
 
 	```ts twoslash tab="@llamaindex/readers"
-	import { SimpleDirectoryReader } from "@llamaindex/readers/directory";
+	import { SimpleDirectoryReader } from "llamaindex";
 
 	const reader = new SimpleDirectoryReader()
 	const documents = await reader.loadData("./data")
diff --git a/apps/next/src/content/docs/llamaindex/guide/loading/node-parser.mdx b/apps/next/src/content/docs/llamaindex/guide/loading/node-parser.mdx
index 2c070ed8c..51760cd9f 100644
--- a/apps/next/src/content/docs/llamaindex/guide/loading/node-parser.mdx
+++ b/apps/next/src/content/docs/llamaindex/guide/loading/node-parser.mdx
@@ -15,7 +15,7 @@ By default, we will use `Settings.nodeParser` to split the document into nodes.
 
 ```ts twoslash
 import { TextFileReader } from '@llamaindex/readers/text'
-import { SentenceSplitter } from '@llamaindex/core/node-parser';
+import { SentenceSplitter } from 'llamaindex';
 import { Settings } from 'llamaindex';
 
 const nodeParser = new SentenceSplitter();
@@ -28,7 +28,7 @@ Settings.nodeParser = nodeParser;
 The underlying text splitter will split text by sentences. It can also be used as a standalone module for splitting raw text.
 
 ```ts twoslash
-import { SentenceSplitter } from "@llamaindex/core/node-parser";
+import { SentenceSplitter } from "llamaindex";
 
 const splitter = new SentenceSplitter({ chunkSize: 1 });
 
@@ -42,7 +42,7 @@ The `MarkdownNodeParser` is a more advanced `NodeParser` that can handle markdow
 
 <Tabs items={["with reader", "with node:fs"]}>
 	```ts twoslash tab="with reader"
-	import { MarkdownNodeParser } from "@llamaindex/core/node-parser";
+	import { MarkdownNodeParser } from "llamaindex";
 	import { MarkdownReader } from '@llamaindex/readers/markdown'
 
 	const reader = new MarkdownReader();
@@ -56,8 +56,7 @@ The `MarkdownNodeParser` is a more advanced `NodeParser` that can handle markdow
 
 	```ts twoslash tab="with node:fs"
 	import fs from 'node:fs/promises';
-	import { MarkdownNodeParser } from "@llamaindex/core/node-parser";
-	import { Document } from '@llamaindex/core/schema';
+	import { MarkdownNodeParser, Document } from "llamaindex";
 
 	const markdownNodeParser = new MarkdownNodeParser();
 	const text = await fs.readFile('path/to/file.md', 'utf-8');
diff --git a/apps/next/src/content/docs/llamaindex/integration/vercel.mdx b/apps/next/src/content/docs/llamaindex/integration/vercel.mdx
index 7eb946247..0640a4edc 100644
--- a/apps/next/src/content/docs/llamaindex/integration/vercel.mdx
+++ b/apps/next/src/content/docs/llamaindex/integration/vercel.mdx
@@ -69,7 +69,7 @@ streamText({
 For production deployments, you can use LlamaCloud to store and manage your documents:
 
 ```typescript
-import { LlamaCloudIndex } from "llamaindex";
+import { LlamaCloudIndex } from "@llamaindex/cloud";
 
 // Create a LlamaCloud index
 const index = await LlamaCloudIndex.fromDocuments({
diff --git a/apps/next/src/content/docs/llamaindex/modules/data_loaders/json.mdx b/apps/next/src/content/docs/llamaindex/modules/data_loaders/json.mdx
index cf69bf73a..3784b47fd 100644
--- a/apps/next/src/content/docs/llamaindex/modules/data_loaders/json.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/data_loaders/json.mdx
@@ -6,10 +6,28 @@ A simple JSON data loader with various options.
 Either parses the entire string, cleaning it and treat each line as an embedding or performs a recursive depth-first traversal yielding JSON paths.
 Supports streaming of large JSON data using [@discoveryjs/json-ext](https://github.com/discoveryjs/json-ext)
 
+## Installation
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/readers
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/readers
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/readers
+	```
+</Tabs>
+
 ## Usage
 
 ```ts
-import { JSONReader } from "llamaindex";
+import { JSONReader } from "@llamaindex/readers/json";
 
 const file = "../../PATH/TO/FILE";
 const content = new TextEncoder().encode("JSON_CONTENT");
diff --git a/apps/next/src/content/docs/llamaindex/modules/data_loaders/llama_parse/images.mdx b/apps/next/src/content/docs/llamaindex/modules/data_loaders/llama_parse/images.mdx
index b8e099e18..542e63051 100644
--- a/apps/next/src/content/docs/llamaindex/modules/data_loaders/llama_parse/images.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/data_loaders/llama_parse/images.mdx
@@ -4,6 +4,24 @@ title: Image Retrieval
 
 LlamaParse `json` mode supports extracting any images found in a page object by using the `getImages` function. They are downloaded to a local folder and can then be sent to a multimodal LLM for further processing.
 
+## Installation
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/cloud @llamaindex/openai
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/cloud @llamaindex/openai
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/cloud @llamaindex/openai
+	```
+</Tabs>
+
 ## Usage
 
 We use the `getImages` method to input our array of JSON objects, download the images to a specified folder and get a list of ImageNodes.
@@ -19,14 +37,10 @@ const imageDicts = await reader.getImages(jsonObjs, "images");
 You can create an index across both text and image nodes by requesting alternative text for the image from a multimodal LLM.
 
 ```ts
-import {
-  Document,
-  ImageNode,
-  LlamaParseReader,
-  OpenAI,
-  VectorStoreIndex,
-} from "llamaindex";
-import { createMessageContent } from "llamaindex/synthesizers/utils";
+import { Document, ImageNode, VectorStoreIndex } from "llamaindex";
+import { LlamaParseReader } from "@llamaindex/cloud";
+import { OpenAI } from "@llamaindex/openai";
+import { createMessageContent } from "llamaindex";
 
 const reader = new LlamaParseReader();
 async function main() {
diff --git a/apps/next/src/content/docs/llamaindex/modules/data_loaders/llama_parse/json_mode.mdx b/apps/next/src/content/docs/llamaindex/modules/data_loaders/llama_parse/json_mode.mdx
index 537d5cba4..4777f0bb1 100644
--- a/apps/next/src/content/docs/llamaindex/modules/data_loaders/llama_parse/json_mode.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/data_loaders/llama_parse/json_mode.mdx
@@ -4,12 +4,32 @@ title: JSON Mode
 
 In JSON mode, LlamaParse will return a data structure representing the parsed object.
 
+## Installation
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/cloud
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/cloud
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/cloud
+	```
+</Tabs>
+
 ## Usage
 
 For Json mode, you need to use `loadJson`. The `resultType` is automatically set with this method.
 More information about indexing the results on the next page.
 
 ```ts
+import { LlamaParseReader } from "@llamaindex/cloud";
+
 const reader = new LlamaParseReader();
 async function main() {
   // Load the file and return an array of json objects
@@ -59,7 +79,8 @@ All Readers share a `loadData` method with `SimpleDirectoryReader` that promises
 However, a simple work around is to create a new reader class that extends `LlamaParseReader` and adds a new method or overrides `loadData`, wrapping around JSON mode, extracting the required values, and returning a Document object.
 
 ```ts
-import { LlamaParseReader, Document } from "llamaindex";
+import { Document } from "llamaindex";
+import { LlamaParseReader } from "@llamaindex/cloud";
 
 class LlamaParseReaderWithJson extends LlamaParseReader {
   // Override the loadData method
diff --git a/apps/next/src/content/docs/llamaindex/modules/data_stores/doc_stores/index.mdx b/apps/next/src/content/docs/llamaindex/modules/data_stores/doc_stores/index.mdx
index 9546efd3f..03b97172d 100644
--- a/apps/next/src/content/docs/llamaindex/modules/data_stores/doc_stores/index.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/data_stores/doc_stores/index.mdx
@@ -11,6 +11,38 @@ Document stores contain ingested document chunks, i.e. [Node](/docs/llamaindex/m
 
 Check the [LlamaIndexTS Github](https://github.com/run-llama/LlamaIndexTS) for the most up to date overview of integrations.
 
+## Using PostgreSQL as Document Store
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/postgres
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/postgres
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/postgres
+	```
+</Tabs>
+
+You can configure the `schemaName`, `tableName`, `namespace`, and
+`connectionString`. If a `connectionString` is not
+provided, it will use the environment variables `PGHOST`, `PGUSER`,
+`PGPASSWORD`, `PGDATABASE` and `PGPORT`.
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "llamaindex";
+import { PostgresDocumentStore } from "@llamaindex/postgres";
+
+const storageContext = await storageContextFromDefaults({
+  docStore: new PostgresDocumentStore(),
+});
+```
+
 ## API Reference
 
 - [BaseDocumentStore](/docs/api/classes/BaseDocumentStore)
diff --git a/apps/next/src/content/docs/llamaindex/modules/data_stores/index.mdx b/apps/next/src/content/docs/llamaindex/modules/data_stores/index.mdx
index 4282f5cdb..0dbc43ade 100644
--- a/apps/next/src/content/docs/llamaindex/modules/data_stores/index.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/data_stores/index.mdx
@@ -5,46 +5,23 @@ title: Storage
 Storage in LlamaIndex.TS works automatically once you've configured a
 `StorageContext` object.
 
-## Local Storage
-
-You can configure the `persistDir` and attach it to an index.
-
-```typescript
-import {
-  Document,
-  VectorStoreIndex,
-  storageContextFromDefaults,
-} from "llamaindex";
-
-const storageContext = await storageContextFromDefaults({
-  persistDir: "./storage",
-});
+Per default a local directory is used for storage. Depening on the storage type (i.e. doc stores, index stores or vector stores), you can configure a different persistence layer. 
+Most commonly a vector database is used as vector store. 
 
-const document = new Document({ text: "Test Text" });
-const index = await VectorStoreIndex.fromDocuments([document], {
-  storageContext,
-});
-```
 
-## PostgreSQL Storage
+## Local Storage
 
-You can configure the `schemaName`, `tableName`, `namespace`, and
-`connectionString`. If a `connectionString` is not
-provided, it will use the environment variables `PGHOST`, `PGUSER`,
-`PGPASSWORD`, `PGDATABASE` and `PGPORT`.
+You can configure the `persistDir` to define where to store the data locally.
 
 ```typescript
 import {
   Document,
   VectorStoreIndex,
-  PostgresDocumentStore,
-  PostgresIndexStore,
   storageContextFromDefaults,
 } from "llamaindex";
 
 const storageContext = await storageContextFromDefaults({
-  docStore: new PostgresDocumentStore(),
-  indexStore: new PostgresIndexStore(),
+  persistDir: "./storage",
 });
 
 const document = new Document({ text: "Test Text" });
diff --git a/apps/next/src/content/docs/llamaindex/modules/data_stores/index_stores/index.mdx b/apps/next/src/content/docs/llamaindex/modules/data_stores/index_stores/index.mdx
index 19b063c28..34effb7fa 100644
--- a/apps/next/src/content/docs/llamaindex/modules/data_stores/index_stores/index.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/data_stores/index_stores/index.mdx
@@ -11,6 +11,38 @@ Index stores are underlying storage components that contain metadata(i.e. inform
 
 Check the [LlamaIndexTS Github](https://github.com/run-llama/LlamaIndexTS) for the most up to date overview of integrations.
 
+## Using PostgreSQL as Index Store
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/postgres
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/postgres
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/postgres
+	```
+</Tabs>
+
+You can configure the `schemaName`, `tableName`, `namespace`, and
+`connectionString`. If a `connectionString` is not
+provided, it will use the environment variables `PGHOST`, `PGUSER`,
+`PGPASSWORD`, `PGDATABASE` and `PGPORT`.
+
+```typescript
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "llamaindex";
+import { PostgresIndexStore } from "@llamaindex/postgres";
+
+const storageContext = await storageContextFromDefaults({
+  indexStore: new PostgresIndexStore(),
+});
+```
+
 ## API Reference
 
 - [BaseIndexStore](/docs/api/classes/BaseIndexStore)
diff --git a/apps/next/src/content/docs/llamaindex/modules/data_stores/vector_stores/qdrant.mdx b/apps/next/src/content/docs/llamaindex/modules/data_stores/vector_stores/qdrant.mdx
index c5a363340..5b86585b1 100644
--- a/apps/next/src/content/docs/llamaindex/modules/data_stores/vector_stores/qdrant.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/data_stores/vector_stores/qdrant.mdx
@@ -11,11 +11,30 @@ docker pull qdrant/qdrant
 docker run -p 6333:6333 qdrant/qdrant
 ```
 
+## Installation
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/qdrant
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/qdrant
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/qdrant
+	```
+</Tabs>
+
 ## Importing the modules
 
 ```ts
 import fs from "node:fs/promises";
-import { Document, VectorStoreIndex, QdrantVectorStore } from "llamaindex";
+import { Document, VectorStoreIndex } from "llamaindex";
+import { QdrantVectorStore } from "@llamaindex/qdrant";
 ```
 
 ## Load the documents
@@ -60,7 +79,8 @@ console.log(response.toString());
 
 ```ts
 import fs from "node:fs/promises";
-import { Document, VectorStoreIndex, QdrantVectorStore } from "llamaindex";
+import { Document, VectorStoreIndex } from "llamaindex";
+import { QdrantVectorStore } from "@llamaindex/qdrant";
 
 async function main() {
   const path = "node_modules/llamaindex/examples/abramov.txt";
diff --git a/apps/next/src/content/docs/llamaindex/modules/documents_and_nodes/metadata_extraction.mdx b/apps/next/src/content/docs/llamaindex/modules/documents_and_nodes/metadata_extraction.mdx
index fbadb14a0..8f92a8c6b 100644
--- a/apps/next/src/content/docs/llamaindex/modules/documents_and_nodes/metadata_extraction.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/documents_and_nodes/metadata_extraction.mdx
@@ -14,13 +14,8 @@ Our metadata extractor modules include the following "feature extractors":
 Then you can chain the `Metadata Extractors` with the `IngestionPipeline` to extract metadata from a set of documents.
 
 ```ts
-import {
-  IngestionPipeline,
-  TitleExtractor,
-  QuestionsAnsweredExtractor,
-  Document,
-  OpenAI,
-} from "llamaindex";
+import { Document, IngestionPipeline, TitleExtractor, QuestionsAnsweredExtractor } from "llamaindex";
+import { OpenAI } from "@llamaindex/openai";
 
 async function main() {
   const pipeline = new IngestionPipeline({
diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/deepinfra.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/deepinfra.mdx
index 3cfd6c369..6993905a1 100644
--- a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/deepinfra.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/deepinfra.mdx
@@ -5,13 +5,27 @@ title: DeepInfra
 To use DeepInfra embeddings, you need to import `DeepInfraEmbedding` from llamaindex.
 Check out available embedding models [here](https://deepinfra.com/models/embeddings).
 
+## Installation
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/deepinfra
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/deepinfra
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/deepinfra
+	```
+</Tabs>
+
 ```ts
-import {
-  DeepInfraEmbedding,
-  Settings,
-  Document,
-  VectorStoreIndex,
-} from "llamaindex";
+import { Document, Settings, VectorStoreIndex } from "llamaindex";
+import { DeepInfraEmbedding } from "@llamaindex/deepinfra";
 
 // Update Embed Model
 Settings.embedModel = new DeepInfraEmbedding();
@@ -33,7 +47,7 @@ By default, DeepInfraEmbedding is using the sentence-transformers/clip-ViT-B-32
 For example:
 
 ```ts
-import { DeepInfraEmbedding } from "llamaindex";
+import { DeepInfraEmbedding } from "@llamaindex/deepinfra";
 
 const model = "intfloat/e5-large-v2";
 Settings.embedModel = new DeepInfraEmbedding({
@@ -46,7 +60,8 @@ You can also set the `maxRetries` and `timeout` parameters when initializing `De
 For example:
 
 ```ts
-import { DeepInfraEmbedding, Settings } from "llamaindex";
+import { Settings } from "llamaindex";
+import { DeepInfraEmbedding } from "@llamaindex/deepinfra";
 
 const model = "intfloat/e5-large-v2";
 const maxRetries = 5;
@@ -62,7 +77,7 @@ Settings.embedModel = new DeepInfraEmbedding({
 Standalone usage:
 
 ```ts
-import { DeepInfraEmbedding } from "llamaindex";
+import { DeepInfraEmbedding } from "@llamaindex/deepinfra";
 import { config } from "dotenv";
 // For standalone usage, you need to configure DEEPINFRA_API_TOKEN in .env file
 config();
diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/gemini.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/gemini.mdx
index 074482cc4..36204a657 100644
--- a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/gemini.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/gemini.mdx
@@ -2,10 +2,29 @@
 title: Gemini
 ---
 
-To use Gemini embeddings, you need to import `GeminiEmbedding` from `llamaindex`.
+To use Gemini embeddings, you need to import `GeminiEmbedding` from `@llamaindex/google`.
+
+## Installation
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/google
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/google
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/google
+	```
+</Tabs>
 
 ```ts
-import { GeminiEmbedding, Settings } from "llamaindex";
+import { Document, Settings, VectorStoreIndex } from "llamaindex";
+import { GeminiEmbedding, GEMINI_MODEL } from "@llamaindex/google";
 
 // Update Embed Model
 Settings.embedModel = new GeminiEmbedding();
@@ -27,7 +46,7 @@ Per default, `GeminiEmbedding` is using the `gemini-pro` model. You can change t
 For example:
 
 ```ts
-import { GEMINI_MODEL, GeminiEmbedding } from "llamaindex";
+import { GEMINI_MODEL, GeminiEmbedding } from "@llamaindex/google";
 
 Settings.embedModel = new GeminiEmbedding({
   model: GEMINI_MODEL.GEMINI_PRO_LATEST,
diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/huggingface.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/huggingface.mdx
index 7b37de3a6..5fe36edf7 100644
--- a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/huggingface.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/huggingface.mdx
@@ -2,10 +2,29 @@
 title: HuggingFace
 ---
 
-To use HuggingFace embeddings, you need to import `HuggingFaceEmbedding` from `llamaindex`.
+To use HuggingFace embeddings, you need to import `HuggingFaceEmbedding` from `@llamaindex/huggingface`.
+
+## Installation
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/huggingface
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/huggingface
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/huggingface
+	```
+</Tabs>
 
 ```ts
-import { HuggingFaceEmbedding, Settings } from "llamaindex";
+import { Document, Settings, VectorStoreIndex } from "llamaindex";
+import { HuggingFaceEmbedding } from "@llamaindex/huggingface";
 
 // Update Embed Model
 Settings.embedModel = new HuggingFaceEmbedding();
@@ -29,6 +48,8 @@ If you're not using a quantized model, set the `quantized` parameter to `false`.
 For example, to use the not quantized `BAAI/bge-small-en-v1.5` model, you can use the following code:
 
 ```ts
+import { HuggingFaceEmbedding } from "@llamaindex/huggingface";
+
 Settings.embedModel = new HuggingFaceEmbedding({
   modelType: "BAAI/bge-small-en-v1.5",
   quantized: false,
diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/mistral.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/mistral.mdx
index b7722640e..70b94b929 100644
--- a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/mistral.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/mistral.mdx
@@ -2,10 +2,29 @@
 title: MistralAI
 ---
 
-To use MistralAI embeddings, you need to import `MistralAIEmbedding` from `llamaindex`.
+To use MistralAI embeddings, you need to import `MistralAIEmbedding` from `@llamaindex/mistral`.
+
+## Installation
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/mistral
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/mistral
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/mistral
+	```
+</Tabs>
 
 ```ts
-import { MistralAIEmbedding, Settings } from "llamaindex";
+import { Document, Settings, VectorStoreIndex } from "llamaindex";
+import { MistralAIEmbedding } from "@llamaindex/mistral";
 
 // Update Embed Model
 Settings.embedModel = new MistralAIEmbedding({
diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/mixedbreadai.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/mixedbreadai.mdx
index f8395b58f..5983aeacd 100644
--- a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/mixedbreadai.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/mixedbreadai.mdx
@@ -14,16 +14,28 @@ To find out more about the latest features, updates, and available models, visit
 
 ## Setup
 
-First, you will need to install the `llamaindex` package.
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/mixedbread
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/mixedbread
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/mixedbread
+	```
+</Tabs>
 
-```bash
-pnpm install llamaindex
-```
 
 Next, sign up for an API key at [mixedbread.ai](https://mixedbread.ai/). Once you have your API key, you can import the necessary modules and create a new instance of the `MixedbreadAIEmbeddings` class.
 
 ```ts
-import { MixedbreadAIEmbeddings, Document, Settings } from "llamaindex";
+import { MixedbreadAIEmbeddings } from "@llamaindex/mixedbread";
+import { Document, Settings } from "llamaindex";
 ```
 
 ## Usage with LlamaIndex
diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/ollama.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/ollama.mdx
index b7e7eeb91..499536d35 100644
--- a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/ollama.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/ollama.mdx
@@ -2,7 +2,7 @@
 title: Ollama
 ---
 
-To use Ollama embeddings, you need to import `OllamaEmbedding` from `llamaindex`.
+To use Ollama embeddings, you need to import `OllamaEmbedding` from `@llamaindex/ollama`.
 
 Note that you need to pull the embedding model first before using it.
 
@@ -12,8 +12,27 @@ In the example below, we're using the [`nomic-embed-text`](https://ollama.com/li
 ollama pull nomic-embed-text
 ```
 
+## Installation
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/ollama
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/ollama
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/ollama
+	```
+</Tabs>
+
 ```ts
-import { OllamaEmbedding, Settings } from "llamaindex";
+import { OllamaEmbedding } from "@llamaindex/ollama";
+import { Document, Settings, VectorStoreIndex } from "llamaindex";
 
 Settings.embedModel = new OllamaEmbedding({ model: "nomic-embed-text" });
 
diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/openai.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/openai.mdx
index cf1ef52ba..d6fefb7bf 100644
--- a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/openai.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/openai.mdx
@@ -2,10 +2,29 @@
 title: OpenAI
 ---
 
-To use OpenAI embeddings, you need to import `OpenAIEmbedding` from `llamaindex`.
+To use OpenAI embeddings, you need to import `OpenAIEmbedding` from `@llamaindex/openai`.
+
+## Installation
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/openai
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/openai
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/openai
+	```
+</Tabs>
 
 ```ts
-import { OpenAIEmbedding, Settings } from "llamaindex";
+import { OpenAIEmbedding } from "@llamaindex/openai";
+import { Document, Settings, VectorStoreIndex } from "llamaindex";
 
 Settings.embedModel = new OpenAIEmbedding();
 
diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/index.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/index.mdx
index d1a911007..62649cc01 100644
--- a/apps/next/src/content/docs/llamaindex/modules/embeddings/index.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/embeddings/index.mdx
@@ -6,8 +6,27 @@ The embedding model in LlamaIndex is responsible for creating numerical represen
 
 This can be explicitly updated through `Settings`
 
+## Installation
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/openai
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/openai
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/openai
+	```
+</Tabs>
+
 ```typescript
-import { OpenAIEmbedding, Settings } from "llamaindex";
+import { OpenAIEmbedding } from "@llamaindex/openai";
+import { Settings } from "llamaindex";
 
 Settings.embedModel = new OpenAIEmbedding({
   model: "text-embedding-ada-002",
diff --git a/apps/next/src/content/docs/llamaindex/modules/evaluation/modules/correctness.mdx b/apps/next/src/content/docs/llamaindex/modules/evaluation/modules/correctness.mdx
index 2e8359409..50cb3c856 100644
--- a/apps/next/src/content/docs/llamaindex/modules/evaluation/modules/correctness.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/evaluation/modules/correctness.mdx
@@ -10,9 +10,21 @@ This is useful for measuring if the response was correct. The evaluator returns
 
 Firstly, you need to install the package:
 
-```bash
-pnpm i llamaindex
-```
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/openai
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/openai
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/openai
+	```
+</Tabs>
 
 Set the OpenAI API key:
 
@@ -23,7 +35,8 @@ export OPENAI_API_KEY=your-api-key
 Import the required modules:
 
 ```ts
-import { CorrectnessEvaluator, OpenAI, Settings, Response } from "llamaindex";
+import { OpenAI } from "@llamaindex/openai";
+import { CorrectnessEvaluator, Settings, Response } from "llamaindex";
 ```
 
 Let's setup gpt-4 for better results:
diff --git a/apps/next/src/content/docs/llamaindex/modules/evaluation/modules/faithfulness.mdx b/apps/next/src/content/docs/llamaindex/modules/evaluation/modules/faithfulness.mdx
index bf87bad80..a875b9c39 100644
--- a/apps/next/src/content/docs/llamaindex/modules/evaluation/modules/faithfulness.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/evaluation/modules/faithfulness.mdx
@@ -12,9 +12,22 @@ This is useful for measuring if the response was hallucinated. The evaluator ret
 
 Firstly, you need to install the package:
 
-```bash
-pnpm i llamaindex
-```
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/openai
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/openai
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/openai
+	```
+</Tabs>
+
 
 Set the OpenAI API key:
 
@@ -25,12 +38,12 @@ export OPENAI_API_KEY=your-api-key
 Import the required modules:
 
 ```ts
+import { OpenAI } from "@llamaindex/openai";
 import {
   Document,
   FaithfulnessEvaluator,
-  OpenAI,
-  VectorStoreIndex,
   Settings,
+  VectorStoreIndex,
 } from "llamaindex";
 ```
 
diff --git a/apps/next/src/content/docs/llamaindex/modules/evaluation/modules/relevancy.mdx b/apps/next/src/content/docs/llamaindex/modules/evaluation/modules/relevancy.mdx
index 211998fb2..ec07979d6 100644
--- a/apps/next/src/content/docs/llamaindex/modules/evaluation/modules/relevancy.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/evaluation/modules/relevancy.mdx
@@ -10,9 +10,22 @@ It is useful for measuring if the response was relevant to the query. The evalua
 
 Firstly, you need to install the package:
 
-```bash
-pnpm i llamaindex
-```
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/openai
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/openai
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/openai
+	```
+</Tabs>
+
 
 Set the OpenAI API key:
 
@@ -23,11 +36,11 @@ export OPENAI_API_KEY=your-api-key
 Import the required modules:
 
 ```ts
+import { OpenAI } from "@llamaindex/openai";
 import {
+  Document,
   RelevancyEvaluator,
-  OpenAI,
   Settings,
-  Document,
   VectorStoreIndex,
 } from "llamaindex";
 ```
diff --git a/apps/next/src/content/docs/llamaindex/modules/ingestion_pipeline/index.mdx b/apps/next/src/content/docs/llamaindex/modules/ingestion_pipeline/index.mdx
index 045e7c5cf..74c2eed16 100644
--- a/apps/next/src/content/docs/llamaindex/modules/ingestion_pipeline/index.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/ingestion_pipeline/index.mdx
@@ -5,18 +5,35 @@ title: Ingestion Pipeline
 An `IngestionPipeline` uses a concept of `Transformations` that are applied to input data.
 These `Transformations` are applied to your input data, and the resulting nodes are either returned or inserted into a vector database (if given).
 
+## Installation
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/openai @llamaindex/qdrant
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/openai @llamaindex/qdrant
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/openai @llamaindex/qdrant
+	```
+</Tabs>
+
 ## Usage Pattern
 
 The simplest usage is to instantiate an IngestionPipeline like so:
 
 ```ts
 import fs from "node:fs/promises";
-
+import { OpenAI, OpenAIEmbedding } from "@llamaindex/openai";
 import {
   Document,
   IngestionPipeline,
   MetadataMode,
-  OpenAIEmbedding,
   TitleExtractor,
   SentenceSplitter,
 } from "llamaindex";
@@ -58,14 +75,14 @@ Then, you can construct an index from that vector store later on.
 ```ts
 import fs from "node:fs/promises";
 
+import { OpenAIEmbedding } from "@llamaindex/openai";
+import { QdrantVectorStore } from "@llamaindex/qdrant";
 import {
   Document,
   IngestionPipeline,
   MetadataMode,
-  OpenAIEmbedding,
   TitleExtractor,
   SentenceSplitter,
-  QdrantVectorStore,
   VectorStoreIndex,
 } from "llamaindex";
 
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/anthropic.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/anthropic.mdx
index 7b508fa47..b96aca404 100644
--- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/anthropic.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/anthropic.mdx
@@ -2,10 +2,29 @@
 title: Anthropic
 ---
 
+## Installation
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/anthropic
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/anthropic
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/anthropic
+	```
+</Tabs>
+
 ## Usage
 
 ```ts
-import { Anthropic, Settings } from "llamaindex";
+import { Settings } from "llamaindex";
+import { Anthropic } from "@llamaindex/anthropic";
 
 Settings.llm = new Anthropic({
   apiKey: "<YOUR_API_KEY>",
@@ -37,7 +56,8 @@ const results = await queryEngine.query({
 ## Full Example
 
 ```ts
-import { Anthropic, Document, VectorStoreIndex, Settings } from "llamaindex";
+import { Document, VectorStoreIndex, Settings } from "llamaindex";
+import { Anthropic } from "@llamaindex/anthropic";
 
 Settings.llm = new Anthropic({
   apiKey: "<YOUR_API_KEY>",
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/azure.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/azure.mdx
index e43cf39f1..3c0231841 100644
--- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/azure.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/azure.mdx
@@ -14,10 +14,29 @@ export AZURE_OPENAI_ENDPOINT="<YOUR ENDPOINT, see https://learn.microsoft.com/en
 export AZURE_OPENAI_DEPLOYMENT="gpt-4" # or some other deployment name
 ```
 
+## Installation
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/openai
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/openai
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/openai
+	```
+</Tabs>
+
 ## Usage
 
 ```ts
-import { OpenAI, Settings } from "llamaindex";
+import { Settings } from "llamaindex";
+import { OpenAI } from "@llamaindex/openai";
 
 Settings.llm = new OpenAI({ model: "gpt-4", temperature: 0 });
 ```
@@ -47,7 +66,8 @@ const results = await queryEngine.query({
 ## Full Example
 
 ```ts
-import { OpenAI, Document, VectorStoreIndex, Settings } from "llamaindex";
+import { Document, VectorStoreIndex, Settings } from "llamaindex";
+import { OpenAI } from "@llamaindex/openai";
 
 Settings.llm = new OpenAI({ model: "gpt-4", temperature: 0 });
 
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/bedrock.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/bedrock.mdx
index ba9f52c14..5c926550d 100644
--- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/bedrock.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/bedrock.mdx
@@ -2,6 +2,24 @@
 title: Bedrock
 ---
 
+## Installation
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/community
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/community
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/community
+	```
+</Tabs>
+
 ## Usage
 
 ```ts
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/deepinfra.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/deepinfra.mdx
index 7a82ffeee..ecd5f10d7 100644
--- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/deepinfra.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/deepinfra.mdx
@@ -4,8 +4,27 @@ title: DeepInfra
 
 Check out available LLMs [here](https://deepinfra.com/models/text-generation).
 
+## Installation
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/deepinfra
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/deepinfra
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/deepinfra
+	```
+</Tabs>
+
 ```ts
-import { DeepInfra, Settings } from "llamaindex";
+import { DeepInfra } from "@llamaindex/deepinfra";
+import { Settings } from "llamaindex";
 
 // Get the API key from `DEEPINFRA_API_TOKEN` environment variable
 import { config } from "dotenv";
@@ -28,6 +47,8 @@ export DEEPINFRA_API_TOKEN="<YOUR_API_KEY>"
 For this example, we will use a single document. In a real-world scenario, you would have multiple documents to index.
 
 ```ts
+import { Document, VectorStoreIndex } from "llamaindex";
+
 const document = new Document({ text: essay, id_: "essay" });
 
 const index = await VectorStoreIndex.fromDocuments([document]);
@@ -48,7 +69,8 @@ const results = await queryEngine.query({
 ## Full Example
 
 ```ts
-import { DeepInfra, Document, VectorStoreIndex, Settings } from "llamaindex";
+import { DeepInfra } from "@llamaindex/deepinfra";
+import { Document, VectorStoreIndex, Settings } from "llamaindex";
 
 // Use custom LLM
 const model = "meta-llama/Meta-Llama-3-8B-Instruct";
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/gemini.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/gemini.mdx
index 0f981b678..8a77cbca9 100644
--- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/gemini.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/gemini.mdx
@@ -2,10 +2,29 @@
 title: Gemini
 ---
 
+## Installation
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/google
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/google
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/google
+	```
+</Tabs>
+
 ## Usage
 
 ```ts
-import { Gemini, Settings, GEMINI_MODEL } from "llamaindex";
+import { Gemini, GEMINI_MODEL } from "@llamaindex/google";
+import { Settings } from "llamaindex";
 
 Settings.llm = new Gemini({
   model: GEMINI_MODEL.GEMINI_PRO,
@@ -19,7 +38,7 @@ To use Gemini via Vertex AI you can use `GeminiVertexSession`.
 GeminiVertexSession accepts the env variables: `GOOGLE_VERTEX_LOCATION` and `GOOGLE_VERTEX_PROJECT`
 
 ```ts
-import { Gemini, GEMINI_MODEL, GeminiVertexSession } from "llamaindex";
+import { Gemini, GEMINI_MODEL, GeminiVertexSession } from "@llamaindex/google";
 
 const gemini = new Gemini({
   model: GEMINI_MODEL.GEMINI_PRO,
@@ -47,6 +66,8 @@ To authenticate for production you'll have to use a [service account](https://cl
 For this example, we will use a single document. In a real-world scenario, you would have multiple documents to index.
 
 ```ts
+import { Document, VectorStoreIndex } from "llamaindex";
+
 const document = new Document({ text: essay, id_: "essay" });
 
 const index = await VectorStoreIndex.fromDocuments([document]);
@@ -67,13 +88,8 @@ const results = await queryEngine.query({
 ## Full Example
 
 ```ts
-import {
-  Gemini,
-  Document,
-  VectorStoreIndex,
-  Settings,
-  GEMINI_MODEL,
-} from "llamaindex";
+import { Gemini, GEMINI_MODEL } from "@llamaindex/google";
+import { Document, VectorStoreIndex, Settings } from "llamaindex";
 
 Settings.llm = new Gemini({
   model: GEMINI_MODEL.GEMINI_PRO,
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/groq.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/groq.mdx
index b2bcfcedd..a570a1fef 100644
--- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/groq.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/groq.mdx
@@ -5,6 +5,24 @@ title: Groq
 import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock';
 import CodeSource from "!raw-loader!../../../../../../../../../examples/groq.ts";
 
+## Installation
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/groq
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/groq
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/groq
+	```
+</Tabs>
+
 ## Usage
 
 First, create an API key at the [Groq Console](https://console.groq.com/keys). Then save it in your environment:
@@ -16,7 +34,8 @@ export GROQ_API_KEY=<your-api-key>
 The initialize the Groq module.
 
 ```ts
-import { Groq, Settings } from "llamaindex";
+import { Groq } from "@llamaindex/groq";
+import { Settings } from "llamaindex";
 
 Settings.llm = new Groq({
   // If you do not wish to set your API key in the environment, you may
@@ -30,6 +49,8 @@ Settings.llm = new Groq({
 For this example, we will use a single document. In a real-world scenario, you would have multiple documents to index.
 
 ```ts
+import { Document, VectorStoreIndex } from "llamaindex";
+
 const document = new Document({ text: essay, id_: "essay" });
 
 const index = await VectorStoreIndex.fromDocuments([document]);
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/llama2.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/llama2.mdx
index 47202a66d..ac74c4ebd 100644
--- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/llama2.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/llama2.mdx
@@ -2,10 +2,29 @@
 title: LLama2
 ---
 
+## Installation
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/replicate
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/replicate
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/replicate
+	```
+</Tabs>
+
 ## Usage
 
 ```ts
-import { Ollama, Settings, DeuceChatStrategy } from "llamaindex";
+import { LlamaDeuce, DeuceChatStrategy } from "@llamaindex/replicate";
+import { Document, VectorStoreIndex, Settings } from "llamaindex";
 
 Settings.llm = new LlamaDeuce({ chatStrategy: DeuceChatStrategy.META });
 ```
@@ -13,12 +32,8 @@ Settings.llm = new LlamaDeuce({ chatStrategy: DeuceChatStrategy.META });
 ## Usage with Replication
 
 ```ts
-import {
-  Ollama,
-  ReplicateSession,
-  Settings,
-  DeuceChatStrategy,
-} from "llamaindex";
+import { Settings } from "llamaindex";
+import { LlamaDeuce, DeuceChatStrategy, ReplicateSession } from "@llamaindex/replicate";
 
 const replicateSession = new ReplicateSession({
   replicateKey,
@@ -55,13 +70,8 @@ const results = await queryEngine.query({
 ## Full Example
 
 ```ts
-import {
-  LlamaDeuce,
-  Document,
-  VectorStoreIndex,
-  Settings,
-  DeuceChatStrategy,
-} from "llamaindex";
+import { LlamaDeuce, DeuceChatStrategy } from "@llamaindex/replicate";
+import { Document, VectorStoreIndex, Settings } from "llamaindex";
 
 // Use the LlamaDeuce LLM
 Settings.llm = new LlamaDeuce({ chatStrategy: DeuceChatStrategy.META });
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/mistral.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/mistral.mdx
index ef25de1e7..1ec50677b 100644
--- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/mistral.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/mistral.mdx
@@ -2,10 +2,29 @@
 title: Mistral
 ---
 
+## Installation
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/mistral
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/mistral
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/mistral
+	```
+</Tabs>
+
 ## Usage
 
 ```ts
-import { MistralAI, Settings } from "llamaindex";
+import { MistralAI } from "@llamaindex/mistral";
+import { Settings } from "llamaindex";
 
 Settings.llm = new MistralAI({
   model: "mistral-tiny",
@@ -18,6 +37,8 @@ Settings.llm = new MistralAI({
 For this example, we will use a single document. In a real-world scenario, you would have multiple documents to index.
 
 ```ts
+import { Document, VectorStoreIndex } from "llamaindex";
+
 const document = new Document({ text: essay, id_: "essay" });
 
 const index = await VectorStoreIndex.fromDocuments([document]);
@@ -38,7 +59,8 @@ const results = await queryEngine.query({
 ## Full Example
 
 ```ts
-import { MistralAI, Document, VectorStoreIndex, Settings } from "llamaindex";
+import { MistralAI } from "@llamaindex/mistral";
+import { Document, VectorStoreIndex, Settings } from "llamaindex";
 
 // Use the MistralAI LLM
 Settings.llm = new MistralAI({ model: "mistral-tiny" });
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/ollama.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/ollama.mdx
index 81e1eddbe..ed3161f24 100644
--- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/ollama.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/ollama.mdx
@@ -2,10 +2,30 @@
 title: Ollama
 ---
 
+## Installation
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/ollama
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/ollama
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/ollama
+	```
+</Tabs>
+
+
 ## Usage
 
 ```ts
-import { Ollama, Settings } from "llamaindex";
+import { Ollama } from "@llamaindex/ollama";
+import { Settings } from "llamaindex";
 
 Settings.llm = ollamaLLM;
 Settings.embedModel = ollamaLLM;
@@ -16,6 +36,8 @@ Settings.embedModel = ollamaLLM;
 For this example, we will use a single document. In a real-world scenario, you would have multiple documents to index.
 
 ```ts
+import { Document, VectorStoreIndex } from "llamaindex";
+
 const document = new Document({ text: essay, id_: "essay" });
 
 const index = await VectorStoreIndex.fromDocuments([document]);
@@ -36,7 +58,8 @@ const results = await queryEngine.query({
 ## Full Example
 
 ```ts
-import { Ollama, Document, VectorStoreIndex, Settings } from "llamaindex";
+import { Ollama } from "@llamaindex/ollama";
+import { Document, VectorStoreIndex, Settings } from "llamaindex";
 
 import fs from "fs/promises";
 
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/openai.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/openai.mdx
index 7211bd6c6..9afdba1cc 100644
--- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/openai.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/openai.mdx
@@ -2,8 +2,28 @@
 title: OpenAI
 ---
 
+## Installation
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/openai
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/openai
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/openai
+	```
+</Tabs>
+
+
 ```ts
-import { OpenAI, Settings } from "llamaindex";
+import { OpenAI } from "@llamaindex/openai";
+import { Settings } from "llamaindex";
 
 Settings.llm = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0, apiKey: <YOUR_API_KEY> });
 ```
@@ -19,6 +39,8 @@ export OPENAI_API_KEY="<YOUR_API_KEY>"
 For this example, we will use a single document. In a real-world scenario, you would have multiple documents to index.
 
 ```ts
+import { Document, VectorStoreIndex } from "llamaindex";
+
 const document = new Document({ text: essay, id_: "essay" });
 
 const index = await VectorStoreIndex.fromDocuments([document]);
@@ -39,7 +61,8 @@ const results = await queryEngine.query({
 ## Full Example
 
 ```ts
-import { OpenAI, Document, VectorStoreIndex, Settings } from "llamaindex";
+import { OpenAI } from "@llamaindex/openai";
+import { Document, Settings, VectorStoreIndex } from "llamaindex";
 
 // Use the OpenAI LLM
 Settings.llm = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/portkey.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/portkey.mdx
index b199afcbf..efd003e4f 100644
--- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/portkey.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/portkey.mdx
@@ -2,10 +2,30 @@
 title: Portkey LLM
 ---
 
+## Installation
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/portkey-ai
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/portkey-ai
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/portkey-ai
+	```
+</Tabs>
+
+
 ## Usage
 
 ```ts
-import { Portkey, Settings } from "llamaindex";
+import { Portkey } from "@llamaindex/portkey-ai";
+import { Settings } from "llamaindex";
 
 Settings.llm = new Portkey({
   apiKey: "<YOUR_API_KEY>",
@@ -17,6 +37,8 @@ Settings.llm = new Portkey({
 For this example, we will use a single document. In a real-world scenario, you would have multiple documents to index.
 
 ```ts
+import { Document, VectorStoreIndex } from "llamaindex";
+
 const document = new Document({ text: essay, id_: "essay" });
 
 const index = await VectorStoreIndex.fromDocuments([document]);
@@ -37,7 +59,8 @@ const results = await queryEngine.query({
 ## Full Example
 
 ```ts
-import { Portkey, Document, VectorStoreIndex, Settings } from "llamaindex";
+import { Portkey } from "@llamaindex/portkey-ai";
+import { Document, Settings, VectorStoreIndex } from "llamaindex";
 
 // Use the Portkey LLM
 Settings.llm = new Portkey({
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/together.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/together.mdx
index 6e395bc3a..65cc58c41 100644
--- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/together.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/together.mdx
@@ -2,10 +2,28 @@
 title: Together LLM
 ---
 
+## Installation
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex
+	```
+</Tabs>
+
 ## Usage
 
 ```ts
-import { TogetherLLM, Settings } from "llamaindex";
+import { Settings, TogetherLLM } from "llamaindex";
 
 Settings.llm = new TogetherLLM({
   apiKey: "<YOUR_API_KEY>",
@@ -17,6 +35,8 @@ Settings.llm = new TogetherLLM({
 For this example, we will use a single document. In a real-world scenario, you would have multiple documents to index.
 
 ```ts
+import { Document, VectorStoreIndex } from "llamaindex";
+
 const document = new Document({ text: essay, id_: "essay" });
 
 const index = await VectorStoreIndex.fromDocuments([document]);
@@ -37,7 +57,8 @@ const results = await queryEngine.query({
 ## Full Example
 
 ```ts
-import { TogetherLLM, Document, VectorStoreIndex, Settings } from "llamaindex";
+import { TogetherLLM } from "@llamaindex/together";
+import { Document, Settings, VectorStoreIndex } from "llamaindex";
 
 Settings.llm = new TogetherLLM({
   apiKey: "<YOUR_API_KEY>",
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/index.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/index.mdx
index 7fecb26f9..1ee9b636d 100644
--- a/apps/next/src/content/docs/llamaindex/modules/llms/index.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/llms/index.mdx
@@ -2,12 +2,31 @@
 title: Large Language Models (LLMs)
 ---
 
-The LLM is responsible for reading text and generating natural language responses to queries. By default, LlamaIndex.TS uses `gpt-3.5-turbo`.
+The LLM is responsible for reading text and generating natural language responses to queries. By default, LlamaIndex.TS uses `gpt-4o`.
 
 The LLM can be explicitly updated through `Settings`.
 
+## Installation
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/openai
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/openai
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/openai
+	```
+</Tabs>
+
 ```typescript
-import { OpenAI, Settings } from "llamaindex";
+import { OpenAI } from "@llamaindex/openai";
+import { Settings } from "llamaindex";
 
 Settings.llm = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
 ```
diff --git a/apps/next/src/content/docs/llamaindex/modules/node_parser.mdx b/apps/next/src/content/docs/llamaindex/modules/node_parser.mdx
index 5f9af92c7..2ce4d57f1 100644
--- a/apps/next/src/content/docs/llamaindex/modules/node_parser.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/node_parser.mdx
@@ -5,7 +5,8 @@ title: NodeParser
 The `NodeParser` in LlamaIndex is responsible for splitting `Document` objects into more manageable `Node` objects. When you call `.fromDocuments()`, the `NodeParser` from the `Settings` is used to do this automatically for you. Alternatively, you can use it to split documents ahead of time.
 
 ```typescript
-import { Document, SentenceSplitter } from "llamaindex";
+import { Document } from "llamaindex";
+import { SentenceSplitter } from "llamaindex";
 
 const nodeParser = new SentenceSplitter();
 
@@ -30,6 +31,7 @@ The `MarkdownNodeParser` is a more advanced `NodeParser` that can handle markdow
 
 ```typescript
 import { MarkdownNodeParser } from "llamaindex";
+import { Document } from "llamaindex";
 
 const nodeParser = new MarkdownNodeParser();
 
diff --git a/apps/next/src/content/docs/llamaindex/modules/node_postprocessors/cohere_reranker.mdx b/apps/next/src/content/docs/llamaindex/modules/node_postprocessors/cohere_reranker.mdx
index a64cea55c..b121c4eb9 100644
--- a/apps/next/src/content/docs/llamaindex/modules/node_postprocessors/cohere_reranker.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/node_postprocessors/cohere_reranker.mdx
@@ -8,20 +8,28 @@ The Cohere Reranker is a postprocessor that uses the Cohere API to rerank the re
 
 Firstly, you will need to install the `llamaindex` package.
 
-```bash
-pnpm install llamaindex
-```
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/cohere @llamaindex/openai
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/cohere @llamaindex/openai
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/cohere @llamaindex/openai
+	```
+</Tabs>
 
 Now, you will need to sign up for an API key at [Cohere](https://cohere.ai/). Once you have your API key you can import the necessary modules and create a new instance of the `CohereRerank` class.
 
 ```ts
-import {
-  CohereRerank,
-  Document,
-  OpenAI,
-  VectorStoreIndex,
-  Settings,
-} from "llamaindex";
+import { OpenAI } from "@llamaindex/openai";
+import { CohereRerank } from "@llamaindex/cohere";
+import { Document, Settings, VectorStoreIndex } from "llamaindex";
 ```
 
 ## Load and index documents
diff --git a/apps/next/src/content/docs/llamaindex/modules/node_postprocessors/index.mdx b/apps/next/src/content/docs/llamaindex/modules/node_postprocessors/index.mdx
index 8a6cd35f6..8ef8c7969 100644
--- a/apps/next/src/content/docs/llamaindex/modules/node_postprocessors/index.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/node_postprocessors/index.mdx
@@ -2,6 +2,24 @@
 title: Node Postprocessors
 ---
 
+## Installation
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/cohere @llamaindex/openai
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/cohere @llamaindex/openai
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/cohere @llamaindex/openai
+	```
+</Tabs>
+
 ## Concept
 
 Node postprocessors are a set of modules that take a set of nodes, and apply some kind of transformation or filtering before returning them.
@@ -15,12 +33,8 @@ LlamaIndex offers several node postprocessors for immediate use, while also prov
 An example of using a node postprocessors is below:
 
 ```ts
-import {
-  Node,
-  NodeWithScore,
-  SimilarityPostprocessor,
-  CohereRerank,
-} from "llamaindex";
+import { CohereRerank } from "@llamaindex/cohere";
+import { Node, NodeWithScore, SimilarityPostprocessor, TextNode } from "llamaindex";
 
 const nodes: NodeWithScore[] = [
   {
@@ -60,7 +74,9 @@ Most commonly, node-postprocessors will be used in a query engine, where they ar
 ### Using Node Postprocessors in a Query Engine
 
 ```ts
-import { Node, NodeWithScore, SimilarityPostprocessor, CohereRerank, Settings } from "llamaindex";
+import { CohereRerank } from "@llamaindex/cohere";
+import { OpenAI } from "@llamaindex/openai";
+import { Node, NodeWithScore, SimilarityPostprocessor, Settings, TextNode } from "llamaindex";
 
 // Use OpenAI LLM
 Settings.llm = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0.1 });
@@ -78,9 +94,9 @@ const nodes: NodeWithScore[] = [
 
 // cohere rerank: rerank nodes given query using trained model
 const reranker = new CohereRerank({
-  apiKey: "<COHERE_API_KEY>,
+  apiKey: "<COHERE_API_KEY>",
   topN: 2,
-})
+});
 
 const document = new Document({ text: "essay", id_: "essay" });
 
diff --git a/apps/next/src/content/docs/llamaindex/modules/node_postprocessors/jinaai_reranker.mdx b/apps/next/src/content/docs/llamaindex/modules/node_postprocessors/jinaai_reranker.mdx
index a43145668..4856157b0 100644
--- a/apps/next/src/content/docs/llamaindex/modules/node_postprocessors/jinaai_reranker.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/node_postprocessors/jinaai_reranker.mdx
@@ -8,20 +8,28 @@ The Jina AI Reranker is a postprocessor that uses the Jina AI Reranker API to re
 
 Firstly, you will need to install the `llamaindex` package.
 
-```bash
-pnpm install llamaindex
-```
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/openai
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/openai
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/openai
+	```
+</Tabs>
+
 
 Now, you will need to sign up for an API key at [Jina AI](https://jina.ai/reranker). Once you have your API key you can import the necessary modules and create a new instance of the `JinaAIReranker` class.
 
 ```ts
-import {
-  JinaAIReranker,
-  Document,
-  OpenAI,
-  VectorStoreIndex,
-  Settings,
-} from "llamaindex";
+import { OpenAI } from "@llamaindex/openai";
+import { Document, Settings, VectorStoreIndex, JinaAIReranker } from "llamaindex";
 ```
 
 ## Load and index documents
diff --git a/apps/next/src/content/docs/llamaindex/modules/node_postprocessors/mixedbreadiai_reranker.mdx b/apps/next/src/content/docs/llamaindex/modules/node_postprocessors/mixedbreadiai_reranker.mdx
index 1a13a7ccb..2561812f8 100644
--- a/apps/next/src/content/docs/llamaindex/modules/node_postprocessors/mixedbreadiai_reranker.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/node_postprocessors/mixedbreadiai_reranker.mdx
@@ -17,20 +17,33 @@ To find out more about the latest features and updates, visit the [mixedbread.ai
 
 First, you will need to install the `llamaindex` package.
 
-```bash
-pnpm install llamaindex
-```
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/openai @llamaindex/mixedbread
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/openai @llamaindex/mixedbread
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/openai @llamaindex/mixedbread
+	```
+</Tabs>
+
 
 Next, sign up for an API key at [mixedbread.ai](https://mixedbread.ai/). Once you have your API key, you can import the necessary modules and create a new instance of the `MixedbreadAIReranker` class.
 
 ```ts
 import {
-  MixedbreadAIReranker,
   Document,
-  OpenAI,
   VectorStoreIndex,
   Settings,
 } from "llamaindex";
+import { OpenAI } from "@llamaindex/openai";
+import { MixedbreadAIReranker } from "@llamaindex/mixedbread";
 ```
 
 ## Usage with LlamaIndex
diff --git a/apps/next/src/content/docs/llamaindex/modules/query_engines/metadata_filtering.mdx b/apps/next/src/content/docs/llamaindex/modules/query_engines/metadata_filtering.mdx
index 480edbf1e..937a4234b 100644
--- a/apps/next/src/content/docs/llamaindex/modules/query_engines/metadata_filtering.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/query_engines/metadata_filtering.mdx
@@ -10,19 +10,27 @@ You can also check our multi-tenancy blog post to see how metadata filtering can
 
 Firstly if you haven't already, you need to install the `llamaindex` package:
 
-```bash
-pnpm i llamaindex
-```
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/openai @llamaindex/chroma
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/openai @llamaindex/chroma
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/openai @llamaindex/chroma
+	```
+</Tabs>
 
 Then you can import the necessary modules from `llamaindex`:
 
 ```ts
-import {
-  ChromaVectorStore,
-  Document,
-  VectorStoreIndex,
-  storageContextFromDefaults,
-} from "llamaindex";
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "llamaindex";
+import { ChromaVectorStore } from "@llamaindex/chroma";
 
 const collectionName = "dog_colors";
 ```
@@ -95,12 +103,8 @@ Besides using the equal operator (`==`), you can also use a whole set of differe
 ## Full Code
 
 ```ts
-import {
-  ChromaVectorStore,
-  Document,
-  VectorStoreIndex,
-  storageContextFromDefaults,
-} from "llamaindex";
+import { Document, VectorStoreIndex, storageContextFromDefaults } from "llamaindex";
+import { ChromaVectorStore } from "@llamaindex/chroma";
 
 const collectionName = "dog_colors";
 
diff --git a/apps/next/src/content/docs/llamaindex/modules/query_engines/router_query_engine.mdx b/apps/next/src/content/docs/llamaindex/modules/query_engines/router_query_engine.mdx
index 0cd4dc000..5322ecbf2 100644
--- a/apps/next/src/content/docs/llamaindex/modules/query_engines/router_query_engine.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/query_engines/router_query_engine.mdx
@@ -8,13 +8,24 @@ In this tutorial, we define a custom router query engine that selects one out of
 
 First, we need to install import the necessary modules from `llamaindex`:
 
-```bash
-pnpm i lamaindex
-```
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install llamaindex @llamaindex/openai @llamaindex/readers
+	```
+
+	```shell tab="yarn"
+	yarn add llamaindex @llamaindex/openai @llamaindex/readers
+	```
+
+	```shell tab="pnpm"
+	pnpm add llamaindex @llamaindex/openai @llamaindex/readers
+	```
+</Tabs>
 
 ```ts
 import {
-  OpenAI,
   RouterQueryEngine,
   SimpleDirectoryReader,
   SentenceSplitter,
@@ -22,6 +33,8 @@ import {
   VectorStoreIndex,
   Settings,
 } from "llamaindex";
+import { OpenAI } from "@llamaindex/openai";
+import { SimpleDirectoryReader } from "llamaindex";
 ```
 
 ## Loading Data
@@ -103,7 +116,6 @@ console.log({
 
 ```ts
 import {
-  OpenAI,
   RouterQueryEngine,
   SimpleDirectoryReader,
   SentenceSplitter,
@@ -111,6 +123,8 @@ import {
   VectorStoreIndex,
   Settings,
 } from "llamaindex";
+import { OpenAI } from "@llamaindex/openai";
+import { SimpleDirectoryReader } from "llamaindex";
 
 Settings.llm = new OpenAI();
 Settings.nodeParser = new SentenceSplitter({
diff --git a/apps/next/src/content/docs/llamaindex/modules/response_synthesizer.mdx b/apps/next/src/content/docs/llamaindex/modules/response_synthesizer.mdx
index 88e350930..bda0d53bf 100644
--- a/apps/next/src/content/docs/llamaindex/modules/response_synthesizer.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/response_synthesizer.mdx
@@ -18,7 +18,7 @@ The ResponseSynthesizer is responsible for sending the query, nodes, and prompt
   chunk.
 
 ```typescript
-import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex";
+import { NodeWithScore, TextNode, ResponseSynthesizer } from "llamaindex";
 
 const responseSynthesizer = new ResponseSynthesizer();
 
diff --git a/apps/next/src/content/docs/llamaindex/modules/workflows.mdx b/apps/next/src/content/docs/llamaindex/modules/workflows.mdx
index 63f176555..c1938b2e4 100644
--- a/apps/next/src/content/docs/llamaindex/modules/workflows.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/workflows.mdx
@@ -116,7 +116,7 @@ const jokeFlow = new Workflow({ verbose: true, validate: true });
 Optionally, you can choose to use global context between steps. For example, maybe multiple steps access the original `query` input from the user. You can store this in global context so that every step has access.
 
 ```typescript
-import { Context } from "@llamaindex/core/workflow";
+import { Context } from "llamaindex";
 
 const query = async (context: Context, ev: MyEvent) => {
   // get the query from the context
diff --git a/e2e/examples/llama-parse-browser/src/main.ts b/e2e/examples/llama-parse-browser/src/main.ts
index 4362227c9..acf58b1be 100644
--- a/e2e/examples/llama-parse-browser/src/main.ts
+++ b/e2e/examples/llama-parse-browser/src/main.ts
@@ -1,4 +1,4 @@
-import { LlamaParseReader } from "@llamaindex/cloud/reader";
+import { LlamaParseReader } from "@llamaindex/cloud";
 import "./style.css";
 
 new LlamaParseReader();
diff --git a/examples/Settings.ts b/examples/Settings.ts
index 47778d757..bf1de4317 100644
--- a/examples/Settings.ts
+++ b/examples/Settings.ts
@@ -1,6 +1,7 @@
 import fs from "node:fs/promises";
 
-import { Document, OpenAI, Settings, VectorStoreIndex } from "llamaindex";
+import { OpenAI } from "@llamaindex/openai";
+import { Document, Settings, VectorStoreIndex } from "llamaindex";
 
 Settings.llm = new OpenAI({ model: "gpt-4" });
 
diff --git a/examples/agent/large_toolcall.ts b/examples/agent/large_toolcall.ts
index e558e2dc5..3cb677465 100644
--- a/examples/agent/large_toolcall.ts
+++ b/examples/agent/large_toolcall.ts
@@ -1,4 +1,5 @@
-import { FunctionTool, OpenAI, OpenAIAgent } from "llamaindex";
+import { OpenAI, OpenAIAgent } from "@llamaindex/openai";
+import { FunctionTool } from "llamaindex";
 
 const csvData =
   "TITLE,RELEASE_YEAR,SCORE,NUMBER_OF_VOTES,DURATION,MAIN_GENRE,MAIN_PRODUCTION\nDavid Attenborough: A Life on Our Planet,2020,9,31180,83,documentary,GB\nInception,2010,8.8,2268288,148,scifi,GB\nForrest Gump,1994,8.8,1994599,142,drama,US\nAnbe Sivam,2003,8.7,20595,160,comedy,IN\nBo Burnham: Inside,2021,8.7,44074,87,comedy,US\nSaving Private Ryan,1998,8.6,1346020,169,drama,US\nDjango Unchained,2012,8.4,1472668,165,western,US\nDangal,2016,8.4,180247,161,action,IN\nBo Burnham: Make Happy,2016,8.4,14356,60,comedy,US\nLouis C.K.: Hilarious,2010,8.4,11973,84,comedy,US\nDave Chappelle: Sticks & Stones,2019,8.4,25687,65,comedy,US\n3 Idiots,2009,8.4,385782,170,comedy,IN\nBlack Friday,2004,8.4,20611,143,crime,IN\nSuper Deluxe,2019,8.4,13680,176,thriller,IN\nWinter on Fire: Ukraine's Fight for Freedom,2015,8.3,17710,98,documentary,UA\nOnce Upon a Time in America,1984,8.3,342335,229,drama,US\nTaxi Driver,1976,8.3,795222,113,crime,US\nLike Stars on Earth,2007,8.3,188234,165,drama,IN\nBo Burnham: What.,2013,8.3,11488,60,comedy,US\nFull Metal Jacket,1987,8.3,723306,116,drama,GB\nWarrior,2011,8.2,463276,140,drama,US\nDrishyam,2015,8.2,79075,163,thriller,IN\nQueen,2014,8.2,64805,146,drama,IN\nPaan Singh Tomar,2012,8.2,35888,135,drama,IN";
diff --git a/examples/agent/large_toolcall_with_gpt4o.ts b/examples/agent/large_toolcall_with_gpt4o.ts
index 522adf1e2..bce2d12f9 100644
--- a/examples/agent/large_toolcall_with_gpt4o.ts
+++ b/examples/agent/large_toolcall_with_gpt4o.ts
@@ -1,4 +1,5 @@
-import { FunctionTool, OpenAI, ToolCallOptions } from "llamaindex";
+import { OpenAI } from "@llamaindex/openai";
+import { FunctionTool, ToolCallOptions } from "llamaindex";
 
 (async () => {
   // The tool call will generate a partial JSON for `gpt-4-turbo`
diff --git a/examples/agent/multi_document_agent.ts b/examples/agent/multi_document_agent.ts
index 4cdd66a75..ae69772bd 100644
--- a/examples/agent/multi_document_agent.ts
+++ b/examples/agent/multi_document_agent.ts
@@ -1,10 +1,9 @@
 import fs from "node:fs/promises";
 
+import { OpenAI, OpenAIAgent } from "@llamaindex/openai";
 import {
   Document,
   ObjectIndex,
-  OpenAI,
-  OpenAIAgent,
   QueryEngineTool,
   SentenceSplitter,
   Settings,
diff --git a/examples/agent/openai-task.ts b/examples/agent/openai-task.ts
index 22a602d93..840b4fe64 100644
--- a/examples/agent/openai-task.ts
+++ b/examples/agent/openai-task.ts
@@ -1,4 +1,5 @@
-import { ChatResponseChunk, OpenAIAgent } from "llamaindex";
+import { OpenAIAgent } from "@llamaindex/openai";
+import { ChatResponseChunk } from "llamaindex";
 import {
   getCurrentIDTool,
   getUserInfoTool,
diff --git a/examples/agent/openai.ts b/examples/agent/openai.ts
index a0780b7c7..cd44ac5ee 100644
--- a/examples/agent/openai.ts
+++ b/examples/agent/openai.ts
@@ -1,4 +1,5 @@
-import { FunctionTool, OpenAIAgent } from "llamaindex";
+import { OpenAIAgent } from "@llamaindex/openai";
+import { FunctionTool } from "llamaindex";
 
 const sumNumbers = FunctionTool.from(
   ({ a, b }: { a: number; b: number }) => `${a + b}`,
diff --git a/examples/agent/query_openai_agent.ts b/examples/agent/query_openai_agent.ts
index dc8b63b9f..7ad7d0760 100644
--- a/examples/agent/query_openai_agent.ts
+++ b/examples/agent/query_openai_agent.ts
@@ -1,5 +1,5 @@
+import { OpenAIAgent } from "@llamaindex/openai";
 import {
-  OpenAIAgent,
   QueryEngineTool,
   SimpleDirectoryReader,
   VectorStoreIndex,
diff --git a/examples/agent/react_agent.ts b/examples/agent/react_agent.ts
index bb094237f..504a4615e 100644
--- a/examples/agent/react_agent.ts
+++ b/examples/agent/react_agent.ts
@@ -1,4 +1,5 @@
-import { Anthropic, FunctionTool, ReActAgent } from "llamaindex";
+import { Anthropic } from "@llamaindex/anthropic";
+import { FunctionTool, ReActAgent } from "llamaindex";
 
 // Define a function to sum two numbers
 function sumNumbers({ a, b }: { a: number; b: number }) {
diff --git a/examples/agent/retriever_openai_agent.ts b/examples/agent/retriever_openai_agent.ts
index e9e5d84aa..a3b196285 100644
--- a/examples/agent/retriever_openai_agent.ts
+++ b/examples/agent/retriever_openai_agent.ts
@@ -1,8 +1,8 @@
+import { OpenAIAgent } from "@llamaindex/openai";
 import {
   FunctionTool,
   MetadataMode,
   NodeWithScore,
-  OpenAIAgent,
   SimpleDirectoryReader,
   VectorStoreIndex,
 } from "llamaindex";
diff --git a/examples/agent/step_wise_query_tool.ts b/examples/agent/step_wise_query_tool.ts
index 829d35f26..c3d0f575d 100644
--- a/examples/agent/step_wise_query_tool.ts
+++ b/examples/agent/step_wise_query_tool.ts
@@ -1,5 +1,5 @@
+import { OpenAIAgent } from "@llamaindex/openai";
 import {
-  OpenAIAgent,
   QueryEngineTool,
   SimpleDirectoryReader,
   VectorStoreIndex,
diff --git a/examples/agent/step_wise_react.ts b/examples/agent/step_wise_react.ts
index ae1cdf751..e6d751b47 100644
--- a/examples/agent/step_wise_react.ts
+++ b/examples/agent/step_wise_react.ts
@@ -1,4 +1,5 @@
-import { Anthropic, FunctionTool, ReActAgent } from "llamaindex";
+import { Anthropic } from "@llamaindex/anthropic";
+import { FunctionTool, ReActAgent } from "llamaindex";
 
 // Define a function to sum two numbers
 function sumNumbers({ a, b }: { a: number; b: number }) {
diff --git a/examples/agent/stream_openai_agent.ts b/examples/agent/stream_openai_agent.ts
index 4d8d6e8fc..7deea96ad 100644
--- a/examples/agent/stream_openai_agent.ts
+++ b/examples/agent/stream_openai_agent.ts
@@ -1,4 +1,5 @@
-import { FunctionTool, OpenAIAgent } from "llamaindex";
+import { OpenAIAgent } from "@llamaindex/openai";
+import { FunctionTool } from "llamaindex";
 
 // Define a function to sum two numbers
 function sumNumbers({ a, b }: { a: number; b: number }) {
diff --git a/examples/agent/wiki.ts b/examples/agent/wiki.ts
index 64d556fe1..d09e8149e 100644
--- a/examples/agent/wiki.ts
+++ b/examples/agent/wiki.ts
@@ -1,4 +1,4 @@
-import { OpenAI, OpenAIAgent } from "llamaindex";
+import { OpenAI, OpenAIAgent } from "@llamaindex/openai";
 import { WikipediaTool } from "../wiki";
 
 async function main() {
diff --git a/examples/anthropic/agent.ts b/examples/anthropic/agent.ts
index 8892dc40e..5d146e104 100644
--- a/examples/anthropic/agent.ts
+++ b/examples/anthropic/agent.ts
@@ -1,5 +1,5 @@
-import { Anthropic, FunctionTool, Settings } from "llamaindex";
-import { AnthropicAgent } from "llamaindex/agent/anthropic";
+import { Anthropic, AnthropicAgent } from "@llamaindex/anthropic";
+import { FunctionTool, Settings } from "llamaindex";
 import { WikipediaTool } from "../wiki";
 
 Settings.callbackManager.on("llm-tool-call", (event) => {
diff --git a/examples/anthropic/chat.ts b/examples/anthropic/chat.ts
index 48117f891..5663d91ea 100644
--- a/examples/anthropic/chat.ts
+++ b/examples/anthropic/chat.ts
@@ -1,4 +1,4 @@
-import { Anthropic } from "llamaindex";
+import { Anthropic } from "@llamaindex/anthropic";
 
 (async () => {
   const anthropic = new Anthropic({
diff --git a/examples/anthropic/chat_interactive.ts b/examples/anthropic/chat_interactive.ts
index 3f33b268f..d4ee02fb7 100644
--- a/examples/anthropic/chat_interactive.ts
+++ b/examples/anthropic/chat_interactive.ts
@@ -1,4 +1,5 @@
-import { Anthropic, ChatMemoryBuffer, SimpleChatEngine } from "llamaindex";
+import { Anthropic } from "@llamaindex/anthropic";
+import { ChatMemoryBuffer, SimpleChatEngine } from "llamaindex";
 import { stdin as input, stdout as output } from "node:process";
 import readline from "node:readline/promises";
 
diff --git a/examples/anthropic/haiku.ts b/examples/anthropic/haiku.ts
index 1e43d23b2..459e56f25 100644
--- a/examples/anthropic/haiku.ts
+++ b/examples/anthropic/haiku.ts
@@ -1,4 +1,4 @@
-import { Anthropic } from "llamaindex";
+import { Anthropic } from "@llamaindex/anthropic";
 
 (async () => {
   const anthropic = new Anthropic({
diff --git a/examples/anthropic/prompt-caching.ts b/examples/anthropic/prompt-caching.ts
index 2f9182e9a..e7d371c50 100644
--- a/examples/anthropic/prompt-caching.ts
+++ b/examples/anthropic/prompt-caching.ts
@@ -1,4 +1,4 @@
-import { Anthropic } from "llamaindex";
+import { Anthropic } from "@llamaindex/anthropic";
 
 async function main() {
   const anthropic = new Anthropic({
diff --git a/examples/anthropic/stream.ts b/examples/anthropic/stream.ts
index a91ed0422..17b07bc55 100644
--- a/examples/anthropic/stream.ts
+++ b/examples/anthropic/stream.ts
@@ -1,4 +1,4 @@
-import { Anthropic } from "llamaindex";
+import { Anthropic } from "@llamaindex/anthropic";
 
 (async () => {
   const anthropic = new Anthropic({
diff --git a/examples/astradb/example.ts b/examples/astradb/example.ts
index e576dcacc..3b8028bb1 100644
--- a/examples/astradb/example.ts
+++ b/examples/astradb/example.ts
@@ -1,5 +1,5 @@
+import { AstraDBVectorStore } from "@llamaindex/astra";
 import {
-  AstraDBVectorStore,
   Document,
   MetadataFilters,
   storageContextFromDefaults,
diff --git a/examples/astradb/load.ts b/examples/astradb/load.ts
index 12f544754..b8e8cbfff 100644
--- a/examples/astradb/load.ts
+++ b/examples/astradb/load.ts
@@ -1,9 +1,6 @@
-import {
-  AstraDBVectorStore,
-  CSVReader,
-  storageContextFromDefaults,
-  VectorStoreIndex,
-} from "llamaindex";
+import { AstraDBVectorStore } from "@llamaindex/astra";
+import { CSVReader } from "@llamaindex/readers/csv";
+import { storageContextFromDefaults, VectorStoreIndex } from "llamaindex";
 
 const collectionName = "movie_reviews";
 
diff --git a/examples/astradb/query.ts b/examples/astradb/query.ts
index f0469cb17..412e339a6 100644
--- a/examples/astradb/query.ts
+++ b/examples/astradb/query.ts
@@ -1,8 +1,5 @@
-import {
-  AstraDBVectorStore,
-  VectorStoreIndex,
-  serviceContextFromDefaults,
-} from "llamaindex";
+import { AstraDBVectorStore } from "@llamaindex/astra";
+import { VectorStoreIndex, serviceContextFromDefaults } from "llamaindex";
 
 const collectionName = "movie_reviews";
 
diff --git a/examples/azure-cosmosdb.ts b/examples/azure-cosmosdb.ts
index fa9999dd6..0f56a79f2 100644
--- a/examples/azure-cosmosdb.ts
+++ b/examples/azure-cosmosdb.ts
@@ -8,13 +8,15 @@ import {
   AzureCosmosDBNoSqlVectorStore,
   AzureCosmosNoSqlDocumentStore,
   AzureCosmosNoSqlIndexStore,
+} from "@llamaindex/azure";
+import { OpenAI, OpenAIEmbedding } from "@llamaindex/openai";
+import {
   Document,
-  OpenAI,
-  OpenAIEmbedding,
   Settings,
   storageContextFromDefaults,
   VectorStoreIndex,
 } from "llamaindex";
+
 /**
  * This example demonstrates how to use Azure CosmosDB with LlamaIndex.
  * It uses Azure CosmosDB as IndexStore, DocumentStore, and VectorStore.
diff --git a/examples/azure/azure-openai.ts b/examples/azure/azure-openai.ts
index b463f686f..ce99eb6cb 100644
--- a/examples/azure/azure-openai.ts
+++ b/examples/azure/azure-openai.ts
@@ -2,8 +2,8 @@ import {
   DefaultAzureCredential,
   getBearerTokenProvider,
 } from "@azure/identity";
+import { OpenAI, OpenAIEmbedding } from "@llamaindex/openai";
 import "dotenv/config";
-import { OpenAI, OpenAIEmbedding } from "llamaindex";
 
 const AZURE_COGNITIVE_SERVICES_SCOPE =
   "https://cognitiveservices.azure.com/.default";
diff --git a/examples/azure/azure_dynamic_session.ts b/examples/azure/azure_dynamic_session.ts
index 31d5375c7..4dc84e024 100644
--- a/examples/azure/azure_dynamic_session.ts
+++ b/examples/azure/azure_dynamic_session.ts
@@ -4,7 +4,9 @@ import {
   DefaultAzureCredential,
   getBearerTokenProvider,
 } from "@azure/identity";
-import { AzureDynamicSessionTool, OpenAI, ReActAgent } from "llamaindex";
+import { AzureDynamicSessionTool } from "@llamaindex/azure";
+import { OpenAI } from "@llamaindex/openai";
+import { ReActAgent } from "llamaindex";
 
 async function main() {
   const credential = new DefaultAzureCredential();
diff --git a/examples/chatHistory.ts b/examples/chatHistory.ts
index c55c618d6..7d4b3caa3 100644
--- a/examples/chatHistory.ts
+++ b/examples/chatHistory.ts
@@ -1,9 +1,9 @@
 import { stdin as input, stdout as output } from "node:process";
 import readline from "node:readline/promises";
 
+import { OpenAI } from "@llamaindex/openai";
 import {
   ChatSummaryMemoryBuffer,
-  OpenAI,
   Settings,
   SimpleChatEngine,
 } from "llamaindex";
diff --git a/examples/chromadb/preFilters.ts b/examples/chromadb/preFilters.ts
index 5b71878b1..764880a6b 100644
--- a/examples/chromadb/preFilters.ts
+++ b/examples/chromadb/preFilters.ts
@@ -1,5 +1,5 @@
+import { ChromaVectorStore } from "@llamaindex/chroma";
 import {
-  ChromaVectorStore,
   Document,
   MetadataFilters,
   VectorStoreIndex,
diff --git a/examples/chromadb/test.ts b/examples/chromadb/test.ts
index dbdcfa367..c22466d17 100644
--- a/examples/chromadb/test.ts
+++ b/examples/chromadb/test.ts
@@ -1,9 +1,6 @@
-import {
-  ChromaVectorStore,
-  CSVReader,
-  storageContextFromDefaults,
-  VectorStoreIndex,
-} from "llamaindex";
+import { ChromaVectorStore } from "@llamaindex/chroma";
+import { CSVReader } from "@llamaindex/readers/csv";
+import { storageContextFromDefaults, VectorStoreIndex } from "llamaindex";
 
 const collectionName = "movie_reviews";
 
diff --git a/examples/cosmosdb/loadVectorData.ts b/examples/cosmosdb/loadVectorData.ts
index cfb043b15..194f0c8b1 100644
--- a/examples/cosmosdb/loadVectorData.ts
+++ b/examples/cosmosdb/loadVectorData.ts
@@ -1,14 +1,13 @@
 import { CosmosClient } from "@azure/cosmos";
 import { DefaultAzureCredential } from "@azure/identity";
+import { AzureCosmosDBNoSQLConfig } from "@llamaindex/azure";
+import { OpenAI, OpenAIEmbedding } from "@llamaindex/openai";
 import {
   SimpleCosmosDBReader,
   SimpleCosmosDBReaderLoaderConfig,
 } from "@llamaindex/readers/cosmosdb";
 import * as dotenv from "dotenv";
 import {
-  AzureCosmosDBNoSQLConfig,
-  OpenAI,
-  OpenAIEmbedding,
   Settings,
   storageContextFromDefaults,
   VectorStoreIndex,
diff --git a/examples/cosmosdb/queryVectorData.ts b/examples/cosmosdb/queryVectorData.ts
index 13e960483..9d1cfbc7e 100644
--- a/examples/cosmosdb/queryVectorData.ts
+++ b/examples/cosmosdb/queryVectorData.ts
@@ -1,10 +1,9 @@
 import { CosmosClient } from "@azure/cosmos";
 import { DefaultAzureCredential } from "@azure/identity";
+import { AzureCosmosDBNoSQLConfig } from "@llamaindex/azure";
+import { OpenAI, OpenAIEmbedding } from "@llamaindex/openai";
 import * as dotenv from "dotenv";
 import {
-  AzureCosmosDBNoSQLConfig,
-  OpenAI,
-  OpenAIEmbedding,
   Settings,
   storageContextFromDefaults,
   VectorStoreIndex,
diff --git a/examples/cosmosdb/utils.ts b/examples/cosmosdb/utils.ts
index 0a1328d01..2b45d7698 100644
--- a/examples/cosmosdb/utils.ts
+++ b/examples/cosmosdb/utils.ts
@@ -4,7 +4,7 @@ import {
   AzureCosmosDBNoSqlVectorStore,
   AzureCosmosNoSqlDocumentStore,
   AzureCosmosNoSqlIndexStore,
-} from "llamaindex";
+} from "@llamaindex/azure";
 
 /**
  * Util function to create AzureCosmosDB vectorStore, docStore, indexStore from connection string.
diff --git a/examples/deepinfra/chat.ts b/examples/deepinfra/chat.ts
index 88bd2b1c1..c3c31bb16 100644
--- a/examples/deepinfra/chat.ts
+++ b/examples/deepinfra/chat.ts
@@ -1,4 +1,4 @@
-import { DeepInfra } from "llamaindex";
+import { DeepInfra } from "@llamaindex/deepinfra";
 
 (async () => {
   if (!process.env.DEEPINFRA_API_TOKEN) {
diff --git a/examples/deepinfra/embedding.ts b/examples/deepinfra/embedding.ts
index adf494209..1d0dd224a 100644
--- a/examples/deepinfra/embedding.ts
+++ b/examples/deepinfra/embedding.ts
@@ -1,4 +1,4 @@
-import { DeepInfraEmbedding } from "llamaindex";
+import { DeepInfraEmbedding } from "@llamaindex/deepinfra";
 
 async function main() {
   // API token can be provided as an environment variable too
diff --git a/examples/evaluation/correctness.ts b/examples/evaluation/correctness.ts
index f9f9b7cbb..af8970830 100644
--- a/examples/evaluation/correctness.ts
+++ b/examples/evaluation/correctness.ts
@@ -1,4 +1,5 @@
-import { CorrectnessEvaluator, OpenAI, Settings } from "llamaindex";
+import { OpenAI } from "@llamaindex/openai";
+import { CorrectnessEvaluator, Settings } from "llamaindex";
 
 // Update llm to use OpenAI
 Settings.llm = new OpenAI({ model: "gpt-4" });
diff --git a/examples/evaluation/faithfulness.ts b/examples/evaluation/faithfulness.ts
index f5b48288f..d47e0cb3e 100644
--- a/examples/evaluation/faithfulness.ts
+++ b/examples/evaluation/faithfulness.ts
@@ -1,7 +1,7 @@
+import { OpenAI } from "@llamaindex/openai";
 import {
   Document,
   FaithfulnessEvaluator,
-  OpenAI,
   Settings,
   VectorStoreIndex,
 } from "llamaindex";
diff --git a/examples/evaluation/relevancy.ts b/examples/evaluation/relevancy.ts
index 91360783c..37b4fd1bc 100644
--- a/examples/evaluation/relevancy.ts
+++ b/examples/evaluation/relevancy.ts
@@ -1,6 +1,6 @@
+import { OpenAI } from "@llamaindex/openai";
 import {
   Document,
-  OpenAI,
   RelevancyEvaluator,
   Settings,
   VectorStoreIndex,
diff --git a/examples/extractors/keywordExtractor.ts b/examples/extractors/keywordExtractor.ts
index 78095cefb..0a5a1433e 100644
--- a/examples/extractors/keywordExtractor.ts
+++ b/examples/extractors/keywordExtractor.ts
@@ -1,9 +1,5 @@
-import {
-  Document,
-  KeywordExtractor,
-  OpenAI,
-  SentenceSplitter,
-} from "llamaindex";
+import { OpenAI } from "@llamaindex/openai";
+import { Document, KeywordExtractor, SentenceSplitter } from "llamaindex";
 
 (async () => {
   const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
diff --git a/examples/extractors/questionsAnsweredExtractor.ts b/examples/extractors/questionsAnsweredExtractor.ts
index d9a4f8a1b..ee518b4c4 100644
--- a/examples/extractors/questionsAnsweredExtractor.ts
+++ b/examples/extractors/questionsAnsweredExtractor.ts
@@ -1,6 +1,6 @@
+import { OpenAI } from "@llamaindex/openai";
 import {
   Document,
-  OpenAI,
   QuestionsAnsweredExtractor,
   SentenceSplitter,
 } from "llamaindex";
diff --git a/examples/extractors/summaryExtractor.ts b/examples/extractors/summaryExtractor.ts
index f7e38c1f4..5cdb6e505 100644
--- a/examples/extractors/summaryExtractor.ts
+++ b/examples/extractors/summaryExtractor.ts
@@ -1,9 +1,5 @@
-import {
-  Document,
-  OpenAI,
-  SentenceSplitter,
-  SummaryExtractor,
-} from "llamaindex";
+import { OpenAI } from "@llamaindex/openai";
+import { Document, SentenceSplitter, SummaryExtractor } from "llamaindex";
 
 (async () => {
   const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
diff --git a/examples/extractors/titleExtractor.ts b/examples/extractors/titleExtractor.ts
index cdd22d930..b117ad1a1 100644
--- a/examples/extractors/titleExtractor.ts
+++ b/examples/extractors/titleExtractor.ts
@@ -1,4 +1,5 @@
-import { Document, OpenAI, SentenceSplitter, TitleExtractor } from "llamaindex";
+import { OpenAI } from "@llamaindex/openai";
+import { Document, SentenceSplitter, TitleExtractor } from "llamaindex";
 
 import essay from "../essay";
 
diff --git a/examples/gemini/agent.ts b/examples/gemini/agent.ts
index 1099d89a1..fb4a292d7 100644
--- a/examples/gemini/agent.ts
+++ b/examples/gemini/agent.ts
@@ -1,10 +1,5 @@
-import {
-  FunctionTool,
-  Gemini,
-  GEMINI_MODEL,
-  LLMAgent,
-  Settings,
-} from "llamaindex";
+import { Gemini, GEMINI_MODEL } from "@llamaindex/google";
+import { FunctionTool, LLMAgent, Settings } from "llamaindex";
 
 Settings.callbackManager.on("llm-tool-call", (event) => {
   console.log(event.detail);
diff --git a/examples/gemini/chat.ts b/examples/gemini/chat.ts
index e4e304f38..1b9a8d9a5 100644
--- a/examples/gemini/chat.ts
+++ b/examples/gemini/chat.ts
@@ -1,4 +1,4 @@
-import { Gemini, GEMINI_MODEL } from "llamaindex";
+import { Gemini, GEMINI_MODEL } from "@llamaindex/google";
 
 (async () => {
   if (!process.env.GOOGLE_API_KEY) {
diff --git a/examples/gemini/chatVertex.ts b/examples/gemini/chatVertex.ts
index 47936aaf2..232ba8ef2 100644
--- a/examples/gemini/chatVertex.ts
+++ b/examples/gemini/chatVertex.ts
@@ -1,4 +1,4 @@
-import { Gemini, GEMINI_MODEL, GeminiVertexSession } from "llamaindex";
+import { Gemini, GEMINI_MODEL, GeminiVertexSession } from "@llamaindex/google";
 
 (async () => {
   const gemini = new Gemini({
diff --git a/examples/gemini/embedding.ts b/examples/gemini/embedding.ts
index 6ecc2692b..fc9049767 100644
--- a/examples/gemini/embedding.ts
+++ b/examples/gemini/embedding.ts
@@ -1,4 +1,4 @@
-import { GEMINI_EMBEDDING_MODEL, GeminiEmbedding } from "llamaindex";
+import { GEMINI_EMBEDDING_MODEL, GeminiEmbedding } from "@llamaindex/google";
 
 async function main() {
   if (!process.env.GOOGLE_API_KEY) {
diff --git a/examples/gptllama.ts b/examples/gptllama.ts
index 71ff0ecfa..66a517fcf 100644
--- a/examples/gptllama.ts
+++ b/examples/gptllama.ts
@@ -1,7 +1,9 @@
 import { stdin as input, stdout as output } from "node:process";
 import readline from "node:readline/promises";
 
-import { ChatMessage, LlamaDeuce, OpenAI } from "llamaindex";
+import { OpenAI } from "@llamaindex/openai";
+import { LlamaDeuce } from "@llamaindex/replicate";
+import { ChatMessage } from "llamaindex";
 
 (async () => {
   const gpt4 = new OpenAI({ model: "gpt-4", temperature: 0.9 });
diff --git a/examples/gptturbollama3.ts b/examples/gptturbollama3.ts
index 95209cb7c..afb44755f 100644
--- a/examples/gptturbollama3.ts
+++ b/examples/gptturbollama3.ts
@@ -1,7 +1,9 @@
 import { stdin as input, stdout as output } from "node:process";
 import readline from "node:readline/promises";
 
-import { ChatMessage, OpenAI, ReplicateLLM } from "llamaindex";
+import { OpenAI } from "@llamaindex/openai";
+import { ReplicateLLM } from "@llamaindex/replicate";
+import { ChatMessage } from "llamaindex";
 
 (async () => {
   const gpt4 = new OpenAI({ model: "gpt-4-turbo", temperature: 0.9 });
diff --git a/examples/groq.ts b/examples/groq.ts
index 1c7b4b9af..284813c6c 100644
--- a/examples/groq.ts
+++ b/examples/groq.ts
@@ -1,12 +1,8 @@
 import fs from "node:fs/promises";
 
-import {
-  Document,
-  Groq,
-  HuggingFaceEmbedding,
-  Settings,
-  VectorStoreIndex,
-} from "llamaindex";
+import { Groq } from "@llamaindex/groq";
+import { HuggingFaceEmbedding } from "@llamaindex/huggingface";
+import { Document, Settings, VectorStoreIndex } from "llamaindex";
 
 // Update llm to use Groq
 Settings.llm = new Groq({
diff --git a/examples/huggingface/chat.ts b/examples/huggingface/chat.ts
index e256f73b1..fdea312fc 100644
--- a/examples/huggingface/chat.ts
+++ b/examples/huggingface/chat.ts
@@ -1,4 +1,4 @@
-import { HuggingFaceInferenceAPI } from "llamaindex";
+import { HuggingFaceInferenceAPI } from "@llamaindex/huggingface";
 
 (async () => {
   if (!process.env.HUGGING_FACE_TOKEN) {
diff --git a/examples/huggingface/embedding.ts b/examples/huggingface/embedding.ts
index 8297b7536..4886c9ac2 100644
--- a/examples/huggingface/embedding.ts
+++ b/examples/huggingface/embedding.ts
@@ -1,12 +1,10 @@
 import fs from "node:fs/promises";
 
 import {
-  Document,
   HuggingFaceEmbedding,
   HuggingFaceEmbeddingModelType,
-  Settings,
-  VectorStoreIndex,
-} from "llamaindex";
+} from "@llamaindex/huggingface";
+import { Document, Settings, VectorStoreIndex } from "llamaindex";
 
 // Update embed model
 Settings.embedModel = new HuggingFaceEmbedding({
diff --git a/examples/huggingface/embeddingApi.ts b/examples/huggingface/embeddingApi.ts
index a89df2703..bc22765c9 100644
--- a/examples/huggingface/embeddingApi.ts
+++ b/examples/huggingface/embeddingApi.ts
@@ -1,12 +1,10 @@
 import fs from "node:fs/promises";
 
 import {
-  Document,
   HuggingFaceInferenceAPI,
   HuggingFaceInferenceAPIEmbedding,
-  Settings,
-  VectorStoreIndex,
-} from "llamaindex";
+} from "@llamaindex/huggingface";
+import { Document, Settings, VectorStoreIndex } from "llamaindex";
 
 if (!process.env.HUGGING_FACE_TOKEN) {
   throw new Error("Please set the HUGGING_FACE_TOKEN environment variable.");
diff --git a/examples/huggingface/local.ts b/examples/huggingface/local.ts
index b2d3cd0d1..878239e87 100644
--- a/examples/huggingface/local.ts
+++ b/examples/huggingface/local.ts
@@ -1,4 +1,4 @@
-import { HuggingFaceLLM } from "llamaindex";
+import { HuggingFaceLLM } from "@llamaindex/huggingface";
 
 (async () => {
   const hf = new HuggingFaceLLM();
diff --git a/examples/ingestion/basicIngestion.ts b/examples/ingestion/basicIngestion.ts
index 9af5711a1..f3ae7269c 100644
--- a/examples/ingestion/basicIngestion.ts
+++ b/examples/ingestion/basicIngestion.ts
@@ -1,7 +1,7 @@
+import { OpenAIEmbedding } from "@llamaindex/openai";
 import {
   Document,
   IngestionPipeline,
-  OpenAIEmbedding,
   SentenceSplitter,
   VectorStoreIndex,
 } from "llamaindex";
diff --git a/examples/jsonExtract.ts b/examples/jsonExtract.ts
index 68af23c1d..4622177e7 100644
--- a/examples/jsonExtract.ts
+++ b/examples/jsonExtract.ts
@@ -1,4 +1,4 @@
-import { OpenAI } from "llamaindex";
+import { OpenAI } from "@llamaindex/openai";
 
 // Example using OpenAI's chat API to extract JSON from a sales call transcript
 // using json_mode see https://platform.openai.com/docs/guides/text-generation/json-mode for more details
diff --git a/examples/llama3.ts b/examples/llama3.ts
index f5da9f23b..49d981be3 100644
--- a/examples/llama3.ts
+++ b/examples/llama3.ts
@@ -1,4 +1,4 @@
-import { ReplicateLLM } from "llamaindex";
+import { ReplicateLLM } from "@llamaindex/replicate";
 
 (async () => {
   const tres = new ReplicateLLM({ model: "llama-3-70b-instruct" });
diff --git a/examples/llamadeuce.ts b/examples/llamadeuce.ts
index 6f8243aa1..ae8309ca7 100644
--- a/examples/llamadeuce.ts
+++ b/examples/llamadeuce.ts
@@ -1,4 +1,4 @@
-import { DeuceChatStrategy, LlamaDeuce } from "llamaindex";
+import { DeuceChatStrategy, LlamaDeuce } from "@llamaindex/replicate";
 
 (async () => {
   const deuce = new LlamaDeuce({ chatStrategy: DeuceChatStrategy.META });
diff --git a/examples/markdown.ts b/examples/markdown.ts
index b2767713e..289828e19 100644
--- a/examples/markdown.ts
+++ b/examples/markdown.ts
@@ -1,4 +1,5 @@
-import { MarkdownReader, VectorStoreIndex } from "llamaindex";
+import { MarkdownReader } from "@llamaindex/readers/markdown";
+import { VectorStoreIndex } from "llamaindex";
 
 async function main() {
   // Load Markdown file
diff --git a/examples/metadata-filter/milvus.ts b/examples/metadata-filter/milvus.ts
index 9415bca57..e26b36629 100644
--- a/examples/metadata-filter/milvus.ts
+++ b/examples/metadata-filter/milvus.ts
@@ -1,4 +1,5 @@
-import { MilvusVectorStore, VectorStoreIndex } from "llamaindex";
+import { MilvusVectorStore } from "@llamaindex/milvus";
+import { VectorStoreIndex } from "llamaindex";
 
 const collectionName = "movie_reviews";
 
diff --git a/examples/milvus/load.ts b/examples/milvus/load.ts
index 25a5c3e64..aea34664c 100644
--- a/examples/milvus/load.ts
+++ b/examples/milvus/load.ts
@@ -1,9 +1,6 @@
-import {
-  CSVReader,
-  MilvusVectorStore,
-  storageContextFromDefaults,
-  VectorStoreIndex,
-} from "llamaindex";
+import { MilvusVectorStore } from "@llamaindex/milvus";
+import { CSVReader } from "@llamaindex/readers/csv";
+import { storageContextFromDefaults, VectorStoreIndex } from "llamaindex";
 
 const collectionName = "movie_reviews";
 
diff --git a/examples/milvus/query.ts b/examples/milvus/query.ts
index ff33fc69a..3ee8a82d9 100644
--- a/examples/milvus/query.ts
+++ b/examples/milvus/query.ts
@@ -1,4 +1,5 @@
-import { MilvusVectorStore, VectorStoreIndex } from "llamaindex";
+import { MilvusVectorStore } from "@llamaindex/milvus";
+import { VectorStoreIndex } from "llamaindex";
 
 const collectionName = "movie_reviews";
 
diff --git a/examples/mistral.ts b/examples/mistral.ts
index 67555c337..5d37a567c 100644
--- a/examples/mistral.ts
+++ b/examples/mistral.ts
@@ -1,11 +1,6 @@
+import { MistralAI, MistralAIEmbedding } from "@llamaindex/mistral";
 import * as fs from "fs/promises";
-import {
-  Document,
-  MistralAI,
-  MistralAIEmbedding,
-  Settings,
-  VectorStoreIndex,
-} from "llamaindex";
+import { Document, Settings, VectorStoreIndex } from "llamaindex";
 
 // Update embed model
 Settings.embedModel = new MistralAIEmbedding();
diff --git a/examples/mongo.ts b/examples/mongo.ts
index 24f245ea3..4bb8aa594 100644
--- a/examples/mongo.ts
+++ b/examples/mongo.ts
@@ -1,4 +1,5 @@
-import { Document, SimpleMongoReader, VectorStoreIndex } from "llamaindex";
+import { SimpleMongoReader } from "@llamaindex/readers/mongo";
+import { Document, VectorStoreIndex } from "llamaindex";
 import { MongoClient } from "mongodb";
 
 import { stdin as input, stdout as output } from "node:process";
diff --git a/examples/mongodb/2_load_and_index.ts b/examples/mongodb/2_load_and_index.ts
index bc2380a29..7eae088f9 100644
--- a/examples/mongodb/2_load_and_index.ts
+++ b/examples/mongodb/2_load_and_index.ts
@@ -1,10 +1,7 @@
+import { MongoDBAtlasVectorSearch } from "@llamaindex/mongodb";
 import { SimpleMongoReader } from "@llamaindex/readers/mongo";
 import * as dotenv from "dotenv";
-import {
-  MongoDBAtlasVectorSearch,
-  storageContextFromDefaults,
-  VectorStoreIndex,
-} from "llamaindex";
+import { storageContextFromDefaults, VectorStoreIndex } from "llamaindex";
 import { MongoClient } from "mongodb";
 
 // Load environment variables from local .env file
diff --git a/examples/mongodb/3_query.ts b/examples/mongodb/3_query.ts
index 2e097b977..088579ca2 100644
--- a/examples/mongodb/3_query.ts
+++ b/examples/mongodb/3_query.ts
@@ -1,5 +1,6 @@
+import { MongoDBAtlasVectorSearch } from "@llamaindex/mongodb";
 import * as dotenv from "dotenv";
-import { MongoDBAtlasVectorSearch, VectorStoreIndex } from "llamaindex";
+import { VectorStoreIndex } from "llamaindex";
 import { MongoClient } from "mongodb";
 
 // Load environment variables from local .env file
diff --git a/examples/multimodal/chat.ts b/examples/multimodal/chat.ts
index 4884a11b5..4e0e4081c 100644
--- a/examples/multimodal/chat.ts
+++ b/examples/multimodal/chat.ts
@@ -1,5 +1,6 @@
 // call pnpm tsx multimodal/load.ts first to init the storage
-import { OpenAI, Settings, SimpleChatEngine, imageToDataUrl } from "llamaindex";
+import { OpenAI } from "@llamaindex/openai";
+import { Settings, SimpleChatEngine, imageToDataUrl } from "llamaindex";
 import fs from "node:fs/promises";
 
 import path from "path";
diff --git a/examples/multimodal/clip.ts b/examples/multimodal/clip.ts
index f3eaa984d..6e0607866 100644
--- a/examples/multimodal/clip.ts
+++ b/examples/multimodal/clip.ts
@@ -1,4 +1,5 @@
-import { ClipEmbedding, similarity, SimilarityType } from "llamaindex";
+import { ClipEmbedding } from "@llamaindex/clip";
+import { similarity, SimilarityType } from "llamaindex";
 
 async function main() {
   const clip = new ClipEmbedding();
diff --git a/examples/multimodal/context.ts b/examples/multimodal/context.ts
index bfe0c7daa..f7c7e12ff 100644
--- a/examples/multimodal/context.ts
+++ b/examples/multimodal/context.ts
@@ -1,10 +1,10 @@
 // call pnpm tsx multimodal/load.ts first to init the storage
-import { extractText } from "@llamaindex/core/utils";
+import { OpenAI } from "@llamaindex/openai";
 import {
   ContextChatEngine,
+  extractText,
   NodeWithScore,
   ObjectType,
-  OpenAI,
   Settings,
   VectorStoreIndex,
 } from "llamaindex";
diff --git a/examples/multimodal/rag.ts b/examples/multimodal/rag.ts
index 14d3a1c74..068121c36 100644
--- a/examples/multimodal/rag.ts
+++ b/examples/multimodal/rag.ts
@@ -1,7 +1,7 @@
-import { extractText } from "@llamaindex/core/utils";
+import { OpenAI } from "@llamaindex/openai";
 import {
+  extractText,
   getResponseSynthesizer,
-  OpenAI,
   Settings,
   VectorStoreIndex,
 } from "llamaindex";
diff --git a/examples/ollama.ts b/examples/ollama.ts
index 781c3cc08..0cc6dd3f4 100644
--- a/examples/ollama.ts
+++ b/examples/ollama.ts
@@ -1,4 +1,4 @@
-import { OllamaEmbedding } from "llamaindex";
+import { OllamaEmbedding } from "@llamaindex/ollama";
 import { Ollama } from "llamaindex/llm/ollama";
 
 (async () => {
diff --git a/examples/openai.ts b/examples/openai.ts
index e1b36e940..20a383d8e 100644
--- a/examples/openai.ts
+++ b/examples/openai.ts
@@ -1,4 +1,4 @@
-import { OpenAI, OpenAIEmbedding } from "llamaindex";
+import { OpenAI, OpenAIEmbedding } from "@llamaindex/openai";
 
 (async () => {
   const llm = new OpenAI({ model: "gpt-4-1106-preview", temperature: 0.1 });
diff --git a/examples/openai_o1.ts b/examples/openai_o1.ts
index 5bf789dd2..2ecc28a17 100644
--- a/examples/openai_o1.ts
+++ b/examples/openai_o1.ts
@@ -1,4 +1,4 @@
-import { OpenAI } from "llamaindex";
+import { OpenAI } from "@llamaindex/openai";
 
 (async () => {
   const llm = new OpenAI({ model: "o1-preview", temperature: 1 });
diff --git a/examples/package.json b/examples/package.json
index 8b73a1920..1054d3c70 100644
--- a/examples/package.json
+++ b/examples/package.json
@@ -4,30 +4,50 @@
   "version": "0.1.0",
   "dependencies": {
     "@ai-sdk/openai": "^1.0.5",
-    "@aws-crypto/sha256-js": "^5.2.0",
     "@azure/cosmos": "^4.1.1",
     "@azure/identity": "^4.4.1",
     "@azure/search-documents": "^12.1.0",
-    "@datastax/astra-db-ts": "^1.4.1",
-    "@llamaindex/core": "^0.4.21",
-    "@llamaindex/readers": "^1.0.23",
     "@llamaindex/vercel": "^0.0.8",
     "@llamaindex/workflow": "^0.0.8",
     "@notionhq/client": "^2.2.15",
     "@pinecone-database/pinecone": "^4.0.0",
     "@vercel/postgres": "^0.10.0",
-    "@zilliz/milvus2-sdk-node": "^2.4.6",
     "ai": "^4.0.0",
-    "chromadb": "^1.8.1",
     "commander": "^12.1.0",
     "dotenv": "^16.4.5",
     "js-tiktoken": "^1.0.14",
     "llamaindex": "^0.8.33",
     "mongodb": "6.7.0",
-    "pathe": "^1.1.2",
     "postgres": "^3.4.4",
     "ajv": "^8.17.1",
-    "wikipedia": "^2.1.2"
+    "wikipedia": "^2.1.2",
+    "@llamaindex/openai": "workspace:*",
+    "@llamaindex/cloud": "workspace:*",
+    "@llamaindex/anthropic": "workspace:*",
+    "@llamaindex/clip": "workspace:*",
+    "@llamaindex/azure": "workspace:*",
+    "@llamaindex/deepinfra": "workspace:*",
+    "@llamaindex/groq": "workspace:*",
+    "@llamaindex/huggingface": "workspace:*",
+    "@llamaindex/node-parser": "workspace:*",
+    "@llamaindex/ollama": "workspace:*",
+    "@llamaindex/portkey-ai": "workspace:*",
+    "@llamaindex/readers": "workspace:*",
+    "@llamaindex/replicate": "workspace:*",
+    "@llamaindex/vllm": "workspace:*",
+    "@llamaindex/postgres": "workspace:*",
+    "@llamaindex/astra": "workspace:*",
+    "@llamaindex/milvus": "workspace:*",
+    "@llamaindex/chroma": "workspace:*",
+    "@llamaindex/mongodb": "workspace:*",
+    "@llamaindex/pinecone": "workspace:*",
+    "@llamaindex/qdrant": "workspace:*",
+    "@llamaindex/upstash": "workspace:*",
+    "@llamaindex/weaviate": "workspace:*",
+    "@llamaindex/google": "workspace:*",
+    "@llamaindex/mistral": "workspace:*",
+    "@llamaindex/mixedbread": "workspace:*",
+    "@llamaindex/cohere": "workspace:*"
   },
   "devDependencies": {
     "@types/node": "^22.9.0",
diff --git a/examples/pinecone-vector-store/load-docs.ts b/examples/pinecone-vector-store/load-docs.ts
index 7de41d0bf..1c6a15758 100755
--- a/examples/pinecone-vector-store/load-docs.ts
+++ b/examples/pinecone-vector-store/load-docs.ts
@@ -1,7 +1,7 @@
 // load-docs.ts
+import { PineconeVectorStore } from "@llamaindex/pinecone";
 import fs from "fs/promises";
 import {
-  PineconeVectorStore,
   SimpleDirectoryReader,
   storageContextFromDefaults,
   VectorStoreIndex,
diff --git a/examples/pinecone-vector-store/query.ts b/examples/pinecone-vector-store/query.ts
index a1ad19524..34ccf5bb1 100755
--- a/examples/pinecone-vector-store/query.ts
+++ b/examples/pinecone-vector-store/query.ts
@@ -1,4 +1,5 @@
-import { PineconeVectorStore, VectorStoreIndex } from "llamaindex";
+import { PineconeVectorStore } from "@llamaindex/pinecone";
+import { VectorStoreIndex } from "llamaindex";
 
 async function main() {
   // eslint-disable-next-line @typescript-eslint/no-require-imports
diff --git a/examples/portkey.ts b/examples/portkey.ts
index 694c02586..a75ab2dd5 100644
--- a/examples/portkey.ts
+++ b/examples/portkey.ts
@@ -1,4 +1,4 @@
-import { Portkey } from "llamaindex";
+import { Portkey } from "@llamaindex/portkey-ai";
 
 (async () => {
   const portkey = new Portkey({
diff --git a/examples/qdrantdb/preFilters.ts b/examples/qdrantdb/preFilters.ts
index bfd59da2e..53f4f03a7 100644
--- a/examples/qdrantdb/preFilters.ts
+++ b/examples/qdrantdb/preFilters.ts
@@ -1,9 +1,9 @@
+import { QdrantVectorStore } from "@llamaindex/qdrant";
 import * as dotenv from "dotenv";
 import {
   Document,
   MetadataMode,
   NodeWithScore,
-  QdrantVectorStore,
   Settings,
   VectorStoreIndex,
   storageContextFromDefaults,
diff --git a/examples/readers/package.json b/examples/readers/package.json
index 5a1693930..a8e4d0f20 100644
--- a/examples/readers/package.json
+++ b/examples/readers/package.json
@@ -20,7 +20,8 @@
   },
   "dependencies": {
     "@llamaindex/readers": "*",
-    "llamaindex": "*"
+    "llamaindex": "*",
+    "@llamaindex/cloud": "*"
   },
   "devDependencies": {
     "@types/node": "^22.9.0",
diff --git a/examples/readers/src/assemblyai.ts b/examples/readers/src/assemblyai.ts
index a15939142..70daa317f 100644
--- a/examples/readers/src/assemblyai.ts
+++ b/examples/readers/src/assemblyai.ts
@@ -1,6 +1,9 @@
-import { AudioTranscriptReader } from "@llamaindex/readers/assembly-ai";
+import {
+  AudioTranscriptReader,
+  TranscribeParams,
+} from "@llamaindex/readers/assembly-ai";
 import { program } from "commander";
-import { TranscribeParams, VectorStoreIndex } from "llamaindex";
+import { VectorStoreIndex } from "llamaindex";
 import { stdin as input, stdout as output } from "node:process";
 import { createInterface } from "node:readline/promises";
 
diff --git a/examples/readers/src/csv.ts b/examples/readers/src/csv.ts
index ac69468e3..2ced907ee 100644
--- a/examples/readers/src/csv.ts
+++ b/examples/readers/src/csv.ts
@@ -1,7 +1,7 @@
+import { OpenAI } from "@llamaindex/openai";
 import { CSVReader } from "@llamaindex/readers/csv";
 import {
   getResponseSynthesizer,
-  OpenAI,
   PromptTemplate,
   Settings,
   VectorStoreIndex,
diff --git a/examples/readers/src/llamaparse-docx.ts b/examples/readers/src/llamaparse-docx.ts
index 6e6609706..5d28d39ec 100644
--- a/examples/readers/src/llamaparse-docx.ts
+++ b/examples/readers/src/llamaparse-docx.ts
@@ -1,4 +1,4 @@
-import { Language, LlamaParseReader } from "llamaindex";
+import { Language, LlamaParseReader } from "@llamaindex/cloud";
 import fs from "node:fs";
 import path from "node:path";
 
diff --git a/examples/readers/src/llamaparse-json.ts b/examples/readers/src/llamaparse-json.ts
index c5f2fc580..05ad482cd 100644
--- a/examples/readers/src/llamaparse-json.ts
+++ b/examples/readers/src/llamaparse-json.ts
@@ -1,11 +1,11 @@
-import { createMessageContent } from "@llamaindex/core/response-synthesizers";
+import { LlamaParseReader } from "@llamaindex/cloud";
+import { OpenAI } from "@llamaindex/openai";
 import {
   Document,
   ImageNode,
-  LlamaParseReader,
-  OpenAI,
   PromptTemplate,
   VectorStoreIndex,
+  createMessageContent,
 } from "llamaindex";
 
 const reader = new LlamaParseReader();
diff --git a/examples/readers/src/llamaparse.ts b/examples/readers/src/llamaparse.ts
index 3a9e6b287..f45c2d221 100644
--- a/examples/readers/src/llamaparse.ts
+++ b/examples/readers/src/llamaparse.ts
@@ -1,4 +1,5 @@
-import { LlamaParseReader, VectorStoreIndex } from "llamaindex";
+import { LlamaParseReader } from "@llamaindex/cloud";
+import { VectorStoreIndex } from "llamaindex";
 
 async function main() {
   // Load PDF using LlamaParse
diff --git a/examples/readers/src/pdf_fw_openai.ts b/examples/readers/src/pdf_fw_openai.ts
index 1089f46af..3e33d37d3 100644
--- a/examples/readers/src/pdf_fw_openai.ts
+++ b/examples/readers/src/pdf_fw_openai.ts
@@ -1,5 +1,6 @@
+import { OpenAI, OpenAIEmbedding } from "@llamaindex/openai";
 import { PDFReader } from "@llamaindex/readers/pdf";
-import { OpenAI, OpenAIEmbedding, VectorStoreIndex } from "llamaindex";
+import { VectorStoreIndex } from "llamaindex";
 
 import { Settings } from "llamaindex";
 
diff --git a/examples/readers/src/simple-directory-reader-with-llamaparse.ts b/examples/readers/src/simple-directory-reader-with-llamaparse.ts
index 5a1214a47..e71683657 100644
--- a/examples/readers/src/simple-directory-reader-with-llamaparse.ts
+++ b/examples/readers/src/simple-directory-reader-with-llamaparse.ts
@@ -1,5 +1,5 @@
-import { SimpleDirectoryReader } from "@llamaindex/readers/directory";
-import { LlamaParseReader, VectorStoreIndex } from "llamaindex";
+import { LlamaParseReader } from "@llamaindex/cloud";
+import { SimpleDirectoryReader, VectorStoreIndex } from "llamaindex";
 
 async function main() {
   const reader = new SimpleDirectoryReader();
diff --git a/examples/readers/src/simple-directory-reader.ts b/examples/readers/src/simple-directory-reader.ts
index 23df9c82b..784b5b5ef 100644
--- a/examples/readers/src/simple-directory-reader.ts
+++ b/examples/readers/src/simple-directory-reader.ts
@@ -1,4 +1,4 @@
-import { SimpleDirectoryReader } from "@llamaindex/readers/directory";
+import { SimpleDirectoryReader } from "llamaindex";
 // or
 // import { SimpleDirectoryReader } from 'llamaindex'
 
diff --git a/examples/readonly.ts b/examples/readonly.ts
index e0a2cfb9b..64fcdf09b 100644
--- a/examples/readonly.ts
+++ b/examples/readonly.ts
@@ -1,9 +1,6 @@
+import { PDFReader } from "@llamaindex/readers/pdf";
 import { execSync } from "child_process";
-import {
-  PDFReader,
-  VectorStoreIndex,
-  storageContextFromDefaults,
-} from "llamaindex";
+import { VectorStoreIndex, storageContextFromDefaults } from "llamaindex";
 
 const STORAGE_DIR = "./cache";
 
diff --git a/examples/recipes/cost-analysis.ts b/examples/recipes/cost-analysis.ts
index 09da95308..2a5e3cb9e 100644
--- a/examples/recipes/cost-analysis.ts
+++ b/examples/recipes/cost-analysis.ts
@@ -1,6 +1,6 @@
-import { extractText } from "@llamaindex/core/utils";
+import { OpenAI } from "@llamaindex/openai";
 import { encodingForModel } from "js-tiktoken";
-import { ChatMessage, OpenAI } from "llamaindex";
+import { ChatMessage, extractText } from "llamaindex";
 import { Settings } from "llamaindex/Settings";
 
 const encoding = encodingForModel("gpt-4-0125-preview");
diff --git a/examples/rerankers/CohereReranker.ts b/examples/rerankers/CohereReranker.ts
index 6b4bda87b..7e1cc812e 100644
--- a/examples/rerankers/CohereReranker.ts
+++ b/examples/rerankers/CohereReranker.ts
@@ -1,10 +1,6 @@
-import {
-  CohereRerank,
-  Document,
-  OpenAI,
-  Settings,
-  VectorStoreIndex,
-} from "llamaindex";
+import { CohereRerank } from "@llamaindex/cohere";
+import { OpenAI } from "@llamaindex/openai";
+import { Document, Settings, VectorStoreIndex } from "llamaindex";
 
 import essay from "../essay";
 
diff --git a/examples/routerQueryEngine.ts b/examples/routerQueryEngine.ts
index b1081d5c3..51f5025bd 100644
--- a/examples/routerQueryEngine.ts
+++ b/examples/routerQueryEngine.ts
@@ -1,5 +1,5 @@
+import { OpenAI } from "@llamaindex/openai";
 import {
-  OpenAI,
   RouterQueryEngine,
   SentenceSplitter,
   Settings,
diff --git a/examples/sentenceWindow.ts b/examples/sentenceWindow.ts
index cd470db34..c8dfe7ce6 100644
--- a/examples/sentenceWindow.ts
+++ b/examples/sentenceWindow.ts
@@ -1,6 +1,6 @@
+import { HuggingFaceEmbedding } from "@llamaindex/huggingface";
 import {
   Document,
-  HuggingFaceEmbedding,
   MetadataReplacementPostProcessor,
   SentenceWindowNodeParser,
   Settings,
diff --git a/examples/toolsStream.ts b/examples/toolsStream.ts
index 7108ddd06..55e6ca250 100644
--- a/examples/toolsStream.ts
+++ b/examples/toolsStream.ts
@@ -1,4 +1,4 @@
-import { OpenAI } from "llamaindex";
+import { OpenAI } from "@llamaindex/openai";
 
 async function main() {
   const llm = new OpenAI({ model: "gpt-4-turbo" });
diff --git a/examples/vector-store/azure/index.ts b/examples/vector-store/azure/index.ts
index ddaeadcf0..2a3f1e57b 100644
--- a/examples/vector-store/azure/index.ts
+++ b/examples/vector-store/azure/index.ts
@@ -7,19 +7,20 @@ import {
   KnownAnalyzerNames,
   KnownVectorSearchAlgorithmKind,
 } from "@azure/search-documents";
-import dotenv from "dotenv";
 import {
   AzureAISearchVectorStore,
-  Document,
   FilterableMetadataFieldKeysType,
+  IndexManagement,
+  MetadataIndexFieldType,
+} from "@llamaindex/azure";
+import { OpenAI, OpenAIEmbedding } from "@llamaindex/openai";
+import dotenv from "dotenv";
+import {
+  Document,
   FilterCondition,
   FilterOperator,
-  IndexManagement,
   Metadata,
-  MetadataIndexFieldType,
   NodeWithScore,
-  OpenAI,
-  OpenAIEmbedding,
   Settings,
   SimpleDirectoryReader,
   storageContextFromDefaults,
diff --git a/examples/vector-store/pg/load-docs.ts b/examples/vector-store/pg/load-docs.ts
index 1932a472c..ac3787715 100755
--- a/examples/vector-store/pg/load-docs.ts
+++ b/examples/vector-store/pg/load-docs.ts
@@ -1,10 +1,10 @@
 // load-docs.ts
+import { PGVectorStore } from "@llamaindex/postgres";
 import {
   SimpleDirectoryReader,
   storageContextFromDefaults,
   VectorStoreIndex,
 } from "llamaindex";
-import { PGVectorStore } from "llamaindex/vector-store/PGVectorStore";
 import fs from "node:fs/promises";
 
 async function getSourceFilenames(sourceDir: string) {
diff --git a/examples/vector-store/pg/supabase.ts b/examples/vector-store/pg/supabase.ts
index 1cbbb5354..db157b862 100644
--- a/examples/vector-store/pg/supabase.ts
+++ b/examples/vector-store/pg/supabase.ts
@@ -1,3 +1,4 @@
+import { PGVectorStore } from "@llamaindex/postgres";
 import dotenv from "dotenv";
 import {
   SimpleDirectoryReader,
@@ -5,8 +6,6 @@ import {
   VectorStoreIndex,
 } from "llamaindex";
 
-import { PGVectorStore } from "llamaindex/vector-store/PGVectorStore";
-
 dotenv.config();
 
 // Get direct connection string from Supabase and set it as POSTGRES_URL environment variable
diff --git a/examples/vectorIndexAnthropic.ts b/examples/vectorIndexAnthropic.ts
index 6ebef441f..2a3e03d0a 100644
--- a/examples/vectorIndexAnthropic.ts
+++ b/examples/vectorIndexAnthropic.ts
@@ -1,7 +1,7 @@
 import fs from "node:fs/promises";
 
+import { Anthropic } from "@llamaindex/anthropic";
 import {
-  Anthropic,
   Document,
   Settings,
   VectorStoreIndex,
diff --git a/examples/vectorIndexCustomize.ts b/examples/vectorIndexCustomize.ts
index 437a5415b..337b6e634 100644
--- a/examples/vectorIndexCustomize.ts
+++ b/examples/vectorIndexCustomize.ts
@@ -1,6 +1,6 @@
+import { OpenAI } from "@llamaindex/openai";
 import {
   Document,
-  OpenAI,
   RetrieverQueryEngine,
   Settings,
   SimilarityPostprocessor,
diff --git a/examples/vectorIndexEmbed3.ts b/examples/vectorIndexEmbed3.ts
index da0507682..57c0371e2 100644
--- a/examples/vectorIndexEmbed3.ts
+++ b/examples/vectorIndexEmbed3.ts
@@ -1,11 +1,7 @@
 import fs from "node:fs/promises";
 
-import {
-  Document,
-  OpenAIEmbedding,
-  Settings,
-  VectorStoreIndex,
-} from "llamaindex";
+import { OpenAIEmbedding } from "@llamaindex/openai";
+import { Document, Settings, VectorStoreIndex } from "llamaindex";
 
 // Update embed model
 Settings.embedModel = new OpenAIEmbedding({
diff --git a/examples/vectorIndexFromVectorStore.ts b/examples/vectorIndexFromVectorStore.ts
index d89a9552b..5526f7392 100644
--- a/examples/vectorIndexFromVectorStore.ts
+++ b/examples/vectorIndexFromVectorStore.ts
@@ -1,8 +1,8 @@
+import { OpenAI, OpenAIEmbedding } from "@llamaindex/openai";
+import { Index, Pinecone, RecordMetadata } from "@pinecone-database/pinecone";
 import {
   BaseVectorStore,
   getResponseSynthesizer,
-  OpenAI,
-  OpenAIEmbedding,
   RetrieverQueryEngine,
   Settings,
   TextNode,
@@ -12,8 +12,6 @@ import {
   VectorStoreQueryResult,
 } from "llamaindex";
 
-import { Index, Pinecone, RecordMetadata } from "@pinecone-database/pinecone";
-
 // Update llm
 Settings.llm = new OpenAI({
   model: "gpt-4",
diff --git a/examples/vectorIndexGPT4.ts b/examples/vectorIndexGPT4.ts
index dab35809e..6134da736 100644
--- a/examples/vectorIndexGPT4.ts
+++ b/examples/vectorIndexGPT4.ts
@@ -1,6 +1,7 @@
 import fs from "node:fs/promises";
 
-import { Document, OpenAI, Settings, VectorStoreIndex } from "llamaindex";
+import { OpenAI } from "@llamaindex/openai";
+import { Document, Settings, VectorStoreIndex } from "llamaindex";
 
 Settings.llm = new OpenAI({ model: "gpt-4" });
 
diff --git a/examples/vectorIndexLocal.ts b/examples/vectorIndexLocal.ts
index 9d19e68ce..f2b67b0f1 100644
--- a/examples/vectorIndexLocal.ts
+++ b/examples/vectorIndexLocal.ts
@@ -1,12 +1,8 @@
 import fs from "node:fs/promises";
 
-import {
-  Document,
-  HuggingFaceEmbedding,
-  Ollama,
-  Settings,
-  VectorStoreIndex,
-} from "llamaindex";
+import { HuggingFaceEmbedding } from "@llamaindex/huggingface";
+import { Ollama } from "@llamaindex/ollama";
+import { Document, Settings, VectorStoreIndex } from "llamaindex";
 
 Settings.llm = new Ollama({
   model: "mixtral:8x7b",
diff --git a/examples/vision.ts b/examples/vision.ts
index 6ee5d33d5..f4838b366 100644
--- a/examples/vision.ts
+++ b/examples/vision.ts
@@ -1,4 +1,4 @@
-import { OpenAI } from "llamaindex";
+import { OpenAI } from "@llamaindex/openai";
 
 (async () => {
   const llm = new OpenAI({ model: "gpt-4-turbo", temperature: 0.1 });
diff --git a/examples/vllm.ts b/examples/vllm.ts
index f65cd375e..c384fb914 100644
--- a/examples/vllm.ts
+++ b/examples/vllm.ts
@@ -1,4 +1,4 @@
-import { VLLM } from "llamaindex";
+import { VLLM } from "@llamaindex/vllm";
 
 const llm = new VLLM({
   model: "NousResearch/Meta-Llama-3-8B-Instruct",
diff --git a/examples/weaviate/load.ts b/examples/weaviate/load.ts
index a1bf38187..a1d0e0032 100644
--- a/examples/weaviate/load.ts
+++ b/examples/weaviate/load.ts
@@ -1,9 +1,6 @@
-import {
-  CSVReader,
-  storageContextFromDefaults,
-  VectorStoreIndex,
-  WeaviateVectorStore,
-} from "llamaindex";
+import { CSVReader } from "@llamaindex/readers/csv";
+import { WeaviateVectorStore } from "@llamaindex/weaviate";
+import { storageContextFromDefaults, VectorStoreIndex } from "llamaindex";
 
 const indexName = "MovieReviews";
 
diff --git a/examples/weaviate/query.ts b/examples/weaviate/query.ts
index ad265dece..6c83f74d6 100644
--- a/examples/weaviate/query.ts
+++ b/examples/weaviate/query.ts
@@ -1,4 +1,5 @@
-import { VectorStoreIndex, WeaviateVectorStore } from "llamaindex";
+import { WeaviateVectorStore } from "@llamaindex/weaviate";
+import { VectorStoreIndex } from "llamaindex";
 
 const indexName = "MovieReviews";
 
diff --git a/examples/wiki.ts b/examples/wiki.ts
index 8ff6efda0..4a829fcb5 100644
--- a/examples/wiki.ts
+++ b/examples/wiki.ts
@@ -1,7 +1,7 @@
 /** Example of a tool that uses Wikipedia */
 
-import type { BaseTool, ToolMetadata } from "@llamaindex/core/llms";
 import type { JSONSchemaType } from "ajv";
+import type { BaseTool, ToolMetadata } from "llamaindex";
 import { default as wiki } from "wikipedia";
 
 type WikipediaParameter = {
diff --git a/examples/workflow/app-creator.ts b/examples/workflow/app-creator.ts
index 8a6448cb1..c1b05659e 100644
--- a/examples/workflow/app-creator.ts
+++ b/examples/workflow/app-creator.ts
@@ -1,3 +1,4 @@
+import { OpenAI } from "@llamaindex/openai";
 import {
   HandlerContext,
   StartEvent,
@@ -5,7 +6,6 @@ import {
   Workflow,
   WorkflowEvent,
 } from "@llamaindex/workflow";
-import { OpenAI } from "llamaindex";
 
 const MAX_REVIEWS = 3;
 
diff --git a/examples/workflow/conditional.ts b/examples/workflow/conditional.ts
index c8b6cf040..6a2e38406 100644
--- a/examples/workflow/conditional.ts
+++ b/examples/workflow/conditional.ts
@@ -1,3 +1,4 @@
+import { OpenAI } from "@llamaindex/openai";
 import {
   HandlerContext,
   StartEvent,
@@ -5,7 +6,6 @@ import {
   Workflow,
   WorkflowEvent,
 } from "@llamaindex/workflow";
-import { OpenAI } from "llamaindex";
 
 // Create LLM instance
 const llm = new OpenAI();
diff --git a/examples/workflow/joke.ts b/examples/workflow/joke.ts
index 310761d77..b13e4dd18 100644
--- a/examples/workflow/joke.ts
+++ b/examples/workflow/joke.ts
@@ -1,10 +1,10 @@
+import { OpenAI } from "@llamaindex/openai";
 import {
   StartEvent,
   StopEvent,
   Workflow,
   WorkflowEvent,
 } from "@llamaindex/workflow";
-import { OpenAI } from "llamaindex";
 
 // Create LLM instance
 const llm = new OpenAI();
diff --git a/examples/workflow/stream-events.ts b/examples/workflow/stream-events.ts
index 2fc1f107d..8d337b354 100644
--- a/examples/workflow/stream-events.ts
+++ b/examples/workflow/stream-events.ts
@@ -1,3 +1,4 @@
+import { OpenAI } from "@llamaindex/openai";
 import {
   HandlerContext,
   StartEvent,
@@ -5,7 +6,6 @@ import {
   Workflow,
   WorkflowEvent,
 } from "@llamaindex/workflow";
-import { OpenAI } from "llamaindex";
 
 // Create LLM instance
 const llm = new OpenAI();
diff --git a/examples/workflow/validation.ts b/examples/workflow/validation.ts
index 7be348cfc..7bfacf844 100644
--- a/examples/workflow/validation.ts
+++ b/examples/workflow/validation.ts
@@ -1,10 +1,10 @@
+import { OpenAI } from "@llamaindex/openai";
 import {
   StartEvent,
   StopEvent,
   Workflow,
   WorkflowEvent,
 } from "@llamaindex/workflow";
-import { OpenAI } from "llamaindex";
 
 // Create LLM instance
 const llm = new OpenAI();
diff --git a/packages/cloud/package.json b/packages/cloud/package.json
index c472ee441..0f56f196f 100644
--- a/packages/cloud/package.json
+++ b/packages/cloud/package.json
@@ -42,6 +42,20 @@
         "types": "./reader/dist/index.d.ts",
         "default": "./reader/dist/index.js"
       }
+    },
+    ".": {
+      "require": {
+        "types": "./reader/dist/index.d.cts",
+        "default": "./reader/dist/index.cjs"
+      },
+      "import": {
+        "types": "./reader/dist/index.d.ts",
+        "default": "./reader/dist/index.js"
+      },
+      "default": {
+        "types": "./reader/dist/index.d.ts",
+        "default": "./reader/dist/index.js"
+      }
     }
   },
   "repository": {
diff --git a/packages/llamaindex/src/index.edge.ts b/packages/llamaindex/src/index.edge.ts
index 302c1fb91..bd0b3cf4e 100644
--- a/packages/llamaindex/src/index.edge.ts
+++ b/packages/llamaindex/src/index.edge.ts
@@ -65,6 +65,7 @@ export * from "@llamaindex/core/storage/chat-store";
 export * from "@llamaindex/core/storage/doc-store";
 export * from "@llamaindex/core/storage/index-store";
 export * from "@llamaindex/core/storage/kv-store";
+export * from "@llamaindex/core/utils";
 export * from "./agent/index.js";
 export * from "./cloud/index.js";
 export * from "./embeddings/index.js";
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index 96dff2a4c..35f006c8c 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -538,9 +538,6 @@ importers:
       '@ai-sdk/openai':
         specifier: ^1.0.5
         version: 1.0.5(zod@3.24.1)
-      '@aws-crypto/sha256-js':
-        specifier: ^5.2.0
-        version: 5.2.0
       '@azure/cosmos':
         specifier: ^4.1.1
         version: 4.1.1
@@ -550,18 +547,90 @@ importers:
       '@azure/search-documents':
         specifier: ^12.1.0
         version: 12.1.0
-      '@datastax/astra-db-ts':
-        specifier: ^1.4.1
-        version: 1.4.1
-      '@llamaindex/core':
-        specifier: ^0.4.21
-        version: link:../packages/core
+      '@llamaindex/anthropic':
+        specifier: workspace:*
+        version: link:../packages/providers/anthropic
+      '@llamaindex/astra':
+        specifier: workspace:*
+        version: link:../packages/providers/storage/astra
+      '@llamaindex/azure':
+        specifier: workspace:*
+        version: link:../packages/providers/storage/azure
+      '@llamaindex/chroma':
+        specifier: workspace:*
+        version: link:../packages/providers/storage/chroma
+      '@llamaindex/clip':
+        specifier: workspace:*
+        version: link:../packages/providers/clip
+      '@llamaindex/cloud':
+        specifier: workspace:*
+        version: link:../packages/cloud
+      '@llamaindex/cohere':
+        specifier: workspace:*
+        version: link:../packages/providers/cohere
+      '@llamaindex/deepinfra':
+        specifier: workspace:*
+        version: link:../packages/providers/deepinfra
+      '@llamaindex/google':
+        specifier: workspace:*
+        version: link:../packages/providers/google
+      '@llamaindex/groq':
+        specifier: workspace:*
+        version: link:../packages/providers/groq
+      '@llamaindex/huggingface':
+        specifier: workspace:*
+        version: link:../packages/providers/huggingface
+      '@llamaindex/milvus':
+        specifier: workspace:*
+        version: link:../packages/providers/storage/milvus
+      '@llamaindex/mistral':
+        specifier: workspace:*
+        version: link:../packages/providers/mistral
+      '@llamaindex/mixedbread':
+        specifier: workspace:*
+        version: link:../packages/providers/mixedbread
+      '@llamaindex/mongodb':
+        specifier: workspace:*
+        version: link:../packages/providers/storage/mongodb
+      '@llamaindex/node-parser':
+        specifier: workspace:*
+        version: link:../packages/node-parser
+      '@llamaindex/ollama':
+        specifier: workspace:*
+        version: link:../packages/providers/ollama
+      '@llamaindex/openai':
+        specifier: workspace:*
+        version: link:../packages/providers/openai
+      '@llamaindex/pinecone':
+        specifier: workspace:*
+        version: link:../packages/providers/storage/pinecone
+      '@llamaindex/portkey-ai':
+        specifier: workspace:*
+        version: link:../packages/providers/portkey-ai
+      '@llamaindex/postgres':
+        specifier: workspace:*
+        version: link:../packages/providers/storage/postgres
+      '@llamaindex/qdrant':
+        specifier: workspace:*
+        version: link:../packages/providers/storage/qdrant
       '@llamaindex/readers':
-        specifier: ^1.0.23
+        specifier: workspace:*
         version: link:../packages/readers
+      '@llamaindex/replicate':
+        specifier: workspace:*
+        version: link:../packages/providers/replicate
+      '@llamaindex/upstash':
+        specifier: workspace:*
+        version: link:../packages/providers/storage/upstash
       '@llamaindex/vercel':
         specifier: ^0.0.8
         version: link:../packages/providers/vercel
+      '@llamaindex/vllm':
+        specifier: workspace:*
+        version: link:../packages/providers/vllm
+      '@llamaindex/weaviate':
+        specifier: workspace:*
+        version: link:../packages/providers/storage/weaviate
       '@llamaindex/workflow':
         specifier: ^0.0.8
         version: link:../packages/workflow
@@ -574,18 +643,12 @@ importers:
       '@vercel/postgres':
         specifier: ^0.10.0
         version: 0.10.0
-      '@zilliz/milvus2-sdk-node':
-        specifier: ^2.4.6
-        version: 2.4.6
       ai:
         specifier: ^4.0.0
         version: 4.0.0(react@19.0.0-rc-5c56b873-20241107)(zod@3.24.1)
       ajv:
         specifier: ^8.17.1
         version: 8.17.1
-      chromadb:
-        specifier: ^1.8.1
-        version: 1.9.2(cohere-ai@7.14.0(@aws-sdk/client-sso-oidc@3.714.0(@aws-sdk/client-sts@3.714.0))(encoding@0.1.13))(encoding@0.1.13)(openai@4.73.1(encoding@0.1.13)(zod@3.24.1))
       commander:
         specifier: ^12.1.0
         version: 12.1.0
@@ -601,9 +664,6 @@ importers:
       mongodb:
         specifier: 6.7.0
         version: 6.7.0(@aws-sdk/credential-providers@3.714.0(@aws-sdk/client-sso-oidc@3.714.0(@aws-sdk/client-sts@3.714.0)))
-      pathe:
-        specifier: ^1.1.2
-        version: 1.1.2
       postgres:
         specifier: ^3.4.4
         version: 3.4.4
@@ -623,6 +683,9 @@ importers:
 
   examples/readers:
     dependencies:
+      '@llamaindex/cloud':
+        specifier: '*'
+        version: link:../../packages/cloud
       '@llamaindex/readers':
         specifier: '*'
         version: link:../../packages/readers
@@ -6142,21 +6205,6 @@ packages:
       voyageai:
         optional: true
 
-  chromadb@1.9.2:
-    resolution: {integrity: sha512-JNeLKlrsPxld7oPJCNeF73yHyyYeyP950enWRkTa6WsJ6UohH2NQ1vXZu6lWO9WuA9EMypITyZFZ8KtcTV3y2Q==}
-    engines: {node: '>=14.17.0'}
-    peerDependencies:
-      '@google/generative-ai': ^0.1.1
-      cohere-ai: ^5.0.0 || ^6.0.0 || ^7.0.0
-      openai: ^3.0.0 || ^4.0.0
-    peerDependenciesMeta:
-      '@google/generative-ai':
-        optional: true
-      cohere-ai:
-        optional: true
-      openai:
-        optional: true
-
   chrome-trace-event@1.0.3:
     resolution: {integrity: sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg==}
     engines: {node: '>=6.0'}
@@ -17330,16 +17378,6 @@ snapshots:
     transitivePeerDependencies:
       - encoding
 
-  chromadb@1.9.2(cohere-ai@7.14.0(@aws-sdk/client-sso-oidc@3.714.0(@aws-sdk/client-sts@3.714.0))(encoding@0.1.13))(encoding@0.1.13)(openai@4.73.1(encoding@0.1.13)(zod@3.24.1)):
-    dependencies:
-      cliui: 8.0.1
-      isomorphic-fetch: 3.0.0(encoding@0.1.13)
-    optionalDependencies:
-      cohere-ai: 7.14.0(@aws-sdk/client-sso-oidc@3.714.0(@aws-sdk/client-sts@3.714.0))(encoding@0.1.13)
-      openai: 4.73.1(encoding@0.1.13)(zod@3.24.1)
-    transitivePeerDependencies:
-      - encoding
-
   chrome-trace-event@1.0.3: {}
 
   ci-info@3.8.0: {}
-- 
GitLab