diff --git a/apps/next/src/content/docs/llamaindex/examples/agent.mdx b/apps/next/src/content/docs/llamaindex/examples/agent.mdx
deleted file mode 100644
index 84b4fb29f39bf2e4c823ca20abcfd5c95253e519..0000000000000000000000000000000000000000
--- a/apps/next/src/content/docs/llamaindex/examples/agent.mdx
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Agents
----
-
-A built-in agent that can take decisions and reasoning based on the tools provided to it.
-
-## OpenAI Agent
-
-import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock';
-import CodeSource from "!raw-loader!../../../../../../../examples/agent/openai";
-
-<DynamicCodeBlock lang="ts" code={CodeSource} />
diff --git a/apps/next/src/content/docs/llamaindex/examples/agent_gemini.mdx b/apps/next/src/content/docs/llamaindex/examples/agent_gemini.mdx
deleted file mode 100644
index e9caf43ac15459bbef21b185149b5c1b3dda3ea9..0000000000000000000000000000000000000000
--- a/apps/next/src/content/docs/llamaindex/examples/agent_gemini.mdx
+++ /dev/null
@@ -1,28 +0,0 @@
----
-title: Gemini Agent
----
-
-import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock';
-import CodeSourceGemini from "!raw-loader!../../../../../../../examples/gemini/agent.ts";
-
-## Installation
-
-import { Tab, Tabs } from "fumadocs-ui/components/tabs";
-
-<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
-	```shell tab="npm"
-	npm install llamaindex @llamaindex/google
-	```
-
-	```shell tab="yarn"
-	yarn add llamaindex @llamaindex/google
-	```
-
-	```shell tab="pnpm"
-	pnpm add llamaindex @llamaindex/google
-	```
-</Tabs>
-
-## Source 
-
-<DynamicCodeBlock lang="ts" code={CodeSourceGemini} />
diff --git a/apps/next/src/content/docs/llamaindex/examples/chat_engine.mdx b/apps/next/src/content/docs/llamaindex/examples/chat_engine.mdx
deleted file mode 100644
index ac4528951a63ee962c461790c29b8681b685d72f..0000000000000000000000000000000000000000
--- a/apps/next/src/content/docs/llamaindex/examples/chat_engine.mdx
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: Chat Engine
----
-
-import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock';
-import CodeSource from "!raw-loader!../../../../../../../examples/chatEngine";
-
-Chat Engine is a class that allows you to create a chatbot from a retriever. It is a wrapper around a retriever that allows you to chat with it in a conversational manner.
-
-<DynamicCodeBlock lang="ts" code={CodeSource} />
diff --git a/apps/next/src/content/docs/llamaindex/examples/context_aware_agent.mdx b/apps/next/src/content/docs/llamaindex/examples/context_aware_agent.mdx
deleted file mode 100644
index d2bc34463a5ccddf447d56035966b2b744aa759d..0000000000000000000000000000000000000000
--- a/apps/next/src/content/docs/llamaindex/examples/context_aware_agent.mdx
+++ /dev/null
@@ -1,59 +0,0 @@
----
-title: Context-Aware Agent
----
-
-The Context-Aware Agent enhances the capabilities of standard LLM agents by incorporating relevant context from a retriever for each query. This allows the agent to provide more informed and specific responses based on the available information.
-
-## Usage
-
-Here's a simple example of how to use the Context-Aware Agent:
-
-```typescript
-import {
-  Document,
-  VectorStoreIndex,
-} from "llamaindex";
-import { OpenAI, OpenAIContextAwareAgent } from "@llamaindex/openai";
-
-async function createContextAwareAgent() {
-  // Create and index some documents
-  const documents = [
-    new Document({
-      text: "LlamaIndex is a data framework for LLM applications.",
-      id_: "doc1",
-    }),
-    new Document({
-      text: "The Eiffel Tower is located in Paris, France.",
-      id_: "doc2",
-    }),
-  ];
-
-  const index = await VectorStoreIndex.fromDocuments(documents);
-  const retriever = index.asRetriever({ similarityTopK: 1 });
-
-  // Create the Context-Aware Agent
-  const agent = new OpenAIContextAwareAgent({
-    llm: new OpenAI({ model: "gpt-3.5-turbo" }),
-    contextRetriever: retriever,
-  });
-
-  // Use the agent to answer queries
-  const response = await agent.chat({
-    message: "What is LlamaIndex used for?",
-  });
-
-  console.log("Agent Response:", response.response);
-}
-
-createContextAwareAgent().catch(console.error);
-```
-
-In this example, the Context-Aware Agent uses the retriever to fetch relevant context for each query, allowing it to provide more accurate and informed responses based on the indexed documents.
-
-## Key Components
-
-- `contextRetriever`: A retriever (e.g., from a VectorStoreIndex) that fetches relevant documents or passages for each query.
-
-## Available Context-Aware Agents
-
-- `OpenAIContextAwareAgent`: A context-aware agent using OpenAI's models.
diff --git a/apps/next/src/content/docs/llamaindex/examples/meta.json b/apps/next/src/content/docs/llamaindex/examples/meta.json
deleted file mode 100644
index f432a3b772cf27bdfa637131826afd9fc0c360c4..0000000000000000000000000000000000000000
--- a/apps/next/src/content/docs/llamaindex/examples/meta.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
-  "title": "Examples",
-  "pages": [
-    "more_examples",
-    "chat_engine",
-    "vector_index",
-    "summary_index",
-    "save_load_index",
-    "context_aware_agent",
-    "agent",
-    "agent_gemini",
-    "local_llm",
-    "other_llms"
-  ]
-}
diff --git a/apps/next/src/content/docs/llamaindex/examples/other_llms.mdx b/apps/next/src/content/docs/llamaindex/examples/other_llms.mdx
deleted file mode 100644
index f2f4e4ae5a9b318993e4db06f1514e40cd45ee3a..0000000000000000000000000000000000000000
--- a/apps/next/src/content/docs/llamaindex/examples/other_llms.mdx
+++ /dev/null
@@ -1,66 +0,0 @@
----
-title: Using other LLM APIs
----
-
-import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock';
-import CodeSource from "!raw-loader!../../../../../../../examples/mistral";
-
-By default LlamaIndex.TS uses OpenAI's LLMs and embedding models, but we support [lots of other LLMs](../modules/llms) including models from Mistral (Mistral, Mixtral), Anthropic (Claude) and Google (Gemini).
-
-If you don't want to use an API at all you can [run a local model](./local_llm).
-
-This example runs you through the process of setting up a Mistral model:
-
-
-## Installation
-
-import { Tab, Tabs } from "fumadocs-ui/components/tabs";
-
-<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
-	```shell tab="npm"
-	npm install llamaindex @llamaindex/mistral
-	```
-
-	```shell tab="yarn"
-	yarn add llamaindex @llamaindex/mistral
-	```
-
-	```shell tab="pnpm"
-	pnpm add llamaindex @llamaindex/mistral
-	```
-</Tabs>
-
-## Using another LLM
-
-You can specify what LLM LlamaIndex.TS will use on the `Settings` object, like this:
-
-```typescript
-import { MistralAI } from "@llamaindex/mistral";
-import { Settings } from "llamaindex";
-
-Settings.llm = new MistralAI({
-  model: "mistral-tiny",
-  apiKey: "<YOUR_API_KEY>",
-});
-```
-
-You can see examples of other APIs we support by checking out "Available LLMs" in the sidebar of our [LLMs section](../modules/llms).
-
-## Using another embedding model
-
-A frequent gotcha when trying to use a different API as your LLM is that LlamaIndex will also by default index and embed your data using OpenAI's embeddings. To completely switch away from OpenAI you will need to set your embedding model as well, for example:
-
-```typescript
-import { MistralAIEmbedding } from "@llamaindex/mistral";
-import { Settings } from "llamaindex";
-
-Settings.embedModel = new MistralAIEmbedding();
-```
-
-We support [many different embeddings](../modules/embeddings).
-
-## Full example
-
-This example uses Mistral's `mistral-tiny` model as the LLM and Mistral for embeddings as well.
-
-<DynamicCodeBlock lang="ts" code={CodeSource} />
diff --git a/apps/next/src/content/docs/llamaindex/examples/save_load_index.mdx b/apps/next/src/content/docs/llamaindex/examples/save_load_index.mdx
deleted file mode 100644
index bce10b9db6cd077c0232f0acdca1af86ac3dd3ec..0000000000000000000000000000000000000000
--- a/apps/next/src/content/docs/llamaindex/examples/save_load_index.mdx
+++ /dev/null
@@ -1,8 +0,0 @@
----
-title: Save/Load an Index
----
-
-import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock';
-import CodeSource from "!raw-loader!../../../../../../../examples/storageContext";
-
-<DynamicCodeBlock lang="ts" code={CodeSource} />
diff --git a/apps/next/src/content/docs/llamaindex/examples/summary_index.mdx b/apps/next/src/content/docs/llamaindex/examples/summary_index.mdx
deleted file mode 100644
index 344ce6fe842ece661a3f3ea762bc07e94d04d961..0000000000000000000000000000000000000000
--- a/apps/next/src/content/docs/llamaindex/examples/summary_index.mdx
+++ /dev/null
@@ -1,8 +0,0 @@
----
-title: Summary Index
----
-
-import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock';
-import CodeSource from "!raw-loader!../../../../../../../examples/summaryIndex";
-
-<DynamicCodeBlock lang="ts" code={CodeSource} />
diff --git a/apps/next/src/content/docs/llamaindex/examples/vector_index.mdx b/apps/next/src/content/docs/llamaindex/examples/vector_index.mdx
deleted file mode 100644
index 03c16fd9567036113a9403b13264fa6bd62be8d2..0000000000000000000000000000000000000000
--- a/apps/next/src/content/docs/llamaindex/examples/vector_index.mdx
+++ /dev/null
@@ -1,8 +0,0 @@
----
-title: Vector Index
----
-
-import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock';
-import CodeSource from "!raw-loader!../../../../../../../examples/vectorIndex";
-
-<DynamicCodeBlock lang="ts" code={CodeSource} />
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/chatbot.mdx b/apps/next/src/content/docs/llamaindex/getting_started/create_llama.mdx
similarity index 73%
rename from apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/chatbot.mdx
rename to apps/next/src/content/docs/llamaindex/getting_started/create_llama.mdx
index 867211893335ca680ea747ce8647983a5b4c2024..99260dc66b2ee7f0a737943623abc9cc329676f5 100644
--- a/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/chatbot.mdx
+++ b/apps/next/src/content/docs/llamaindex/getting_started/create_llama.mdx
@@ -1,11 +1,7 @@
 ---
-title: Chatbot tutorial
+title: Create-Llama
 ---
 
-Once you've mastered basic [retrieval-augment generation](retrieval_augmented_generation) you may want to create an interface to chat with your data. You can do this step-by-step, but we recommend getting started quickly using `create-llama`.
-
-## Using create-llama
-
 `create-llama` is a powerful but easy to use command-line tool that generates a working, full-stack web application that allows you to chat with your data. You can learn more about it on [the `create-llama` README page](https://www.npmjs.com/package/create-llama).
 
 Run it once and it will ask you a series of questions about the kind of application you want to generate. Then you can customize your application to suit your use-case. To get started, run:
diff --git a/apps/next/src/content/docs/llamaindex/examples/more_examples.mdx b/apps/next/src/content/docs/llamaindex/getting_started/examples.mdx
similarity index 63%
rename from apps/next/src/content/docs/llamaindex/examples/more_examples.mdx
rename to apps/next/src/content/docs/llamaindex/getting_started/examples.mdx
index ef7a02754d50ee1fc02038e5ebf0aacc732a0b60..31db581bc905269f3ca8ef97f14e45bce53828f0 100644
--- a/apps/next/src/content/docs/llamaindex/examples/more_examples.mdx
+++ b/apps/next/src/content/docs/llamaindex/getting_started/examples.mdx
@@ -1,10 +1,10 @@
 ---
-title: See all examples
+title: Code examples
 ---
 
 Our GitHub repository has a wealth of examples to explore and try out. You can check out our [examples folder](https://github.com/run-llama/LlamaIndexTS/tree/main/examples) to see them all at once, or browse the pages in this section for some selected highlights.
 
-## Check out all examples
+## Use examples locally
 
 It may be useful to check out all the examples at once so you can try them out locally. To do this into a folder called `my-new-project`, run these commands:
 
@@ -19,3 +19,14 @@ Then you can run any example in the folder with `tsx`, e.g.:
 ```bash npm2yarn
 npx tsx ./vectorIndex.ts
 ```
+
+## Try examples online
+
+You can also try the examples online using StackBlitz:
+
+<iframe
+  className="w-full h-[440px]"
+  aria-label="LlamaIndex.TS Examples"
+  aria-description="This is a list of examples for LlamaIndex.TS."
+  src="https://stackblitz.com/github/run-llama/LlamaIndexTS/tree/main/examples?file=README.md"
+/>
\ No newline at end of file
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/setup/cloudflare.mdx b/apps/next/src/content/docs/llamaindex/getting_started/frameworks/cloudflare.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/getting_started/setup/cloudflare.mdx
rename to apps/next/src/content/docs/llamaindex/getting_started/frameworks/cloudflare.mdx
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/setup/index.mdx b/apps/next/src/content/docs/llamaindex/getting_started/frameworks/index.mdx
similarity index 97%
rename from apps/next/src/content/docs/llamaindex/getting_started/setup/index.mdx
rename to apps/next/src/content/docs/llamaindex/getting_started/frameworks/index.mdx
index aa4a01253006240ec29d30786ccf976b9cb39707..db7836ab0da004c7d514d391f574ff2e3fd93d9c 100644
--- a/apps/next/src/content/docs/llamaindex/getting_started/setup/index.mdx
+++ b/apps/next/src/content/docs/llamaindex/getting_started/frameworks/index.mdx
@@ -1,5 +1,5 @@
 ---
-title: Choose Framework
+title: Frameworks
 description: We support multiple JS runtime and frameworks, bundlers.
 ---
 import {
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/frameworks/meta.json b/apps/next/src/content/docs/llamaindex/getting_started/frameworks/meta.json
new file mode 100644
index 0000000000000000000000000000000000000000..0b6f3902efd57774f27ee28d40468c30998301a3
--- /dev/null
+++ b/apps/next/src/content/docs/llamaindex/getting_started/frameworks/meta.json
@@ -0,0 +1,6 @@
+{
+  "title": "Framework",
+  "description": "The setup guide",
+  "defaultOpen": true,
+  "pages": ["node", "typescript", "next", "vite", "cloudflare"]
+}
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/setup/next.mdx b/apps/next/src/content/docs/llamaindex/getting_started/frameworks/next.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/getting_started/setup/next.mdx
rename to apps/next/src/content/docs/llamaindex/getting_started/frameworks/next.mdx
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/setup/node.mdx b/apps/next/src/content/docs/llamaindex/getting_started/frameworks/node.mdx
similarity index 97%
rename from apps/next/src/content/docs/llamaindex/getting_started/setup/node.mdx
rename to apps/next/src/content/docs/llamaindex/getting_started/frameworks/node.mdx
index de48fa881d62d64ee03ba1a6abcbd6934cb1504f..d9d56c2433abd8839fda1e42f523aa12f946cd4b 100644
--- a/apps/next/src/content/docs/llamaindex/getting_started/setup/node.mdx
+++ b/apps/next/src/content/docs/llamaindex/getting_started/frameworks/node.mdx
@@ -42,7 +42,7 @@ By the default, we are using `js-tiktoken` for tokenization. You can install `gp
 	```
 </Tabs>
 
-> Note: This only works for Node.js
+**Note**: This only works for Node.js
 
 ## TypeScript support
 
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/setup/typescript.mdx b/apps/next/src/content/docs/llamaindex/getting_started/frameworks/typescript.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/getting_started/setup/typescript.mdx
rename to apps/next/src/content/docs/llamaindex/getting_started/frameworks/typescript.mdx
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/setup/vite.mdx b/apps/next/src/content/docs/llamaindex/getting_started/frameworks/vite.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/getting_started/setup/vite.mdx
rename to apps/next/src/content/docs/llamaindex/getting_started/frameworks/vite.mdx
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/images/create_llama.png b/apps/next/src/content/docs/llamaindex/getting_started/images/create_llama.png
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/images/create_llama.png
rename to apps/next/src/content/docs/llamaindex/getting_started/images/create_llama.png
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/index.mdx b/apps/next/src/content/docs/llamaindex/getting_started/index.mdx
index 27cf410524b6ddb3e2cbe94dd01873dd7145d48c..184abdd1e5decfe9463da06a0ddd7ff0cf2024bf 100644
--- a/apps/next/src/content/docs/llamaindex/getting_started/index.mdx
+++ b/apps/next/src/content/docs/llamaindex/getting_started/index.mdx
@@ -37,20 +37,20 @@ In most cases, you'll also need an LLM package to use LlamaIndex. For example, t
 	```
 </Tabs>
 
-Go to [Using other LLM APIs](/docs/llamaindex/examples/other_llms) to find out how to use other LLMs.
+Go to [LLM APIs](/docs/llamaindex/modules/llms) to find out how to use other LLMs.
 
 
 ## What's next?
 
 <Cards>
 	<Card
-		title="I want to try LlamaIndex.TS"
-		description="Learn how to use LlamaIndex.TS with different JS runtime and frameworks."
-		href="/docs/llamaindex/getting_started/setup"
+		title="Learn LlamaIndex.TS"
+		description="Learn how to use LlamaIndex.TS by starting with one of our tutorials."
+		href="/docs/llamaindex/tutorials/rag"
 	/>
 	<Card
 		title="Show me code examples"
 		description="Explore code examples using LlamaIndex.TS."
-		href="https://stackblitz.com/github/run-llama/LlamaIndexTS/tree/main/examples?file=README.md"
+		href="/docs/llamaindex/getting_started/examples"
 	/>
 </Cards>
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/meta.json b/apps/next/src/content/docs/llamaindex/getting_started/meta.json
index ed2c8903e121667e73925f79814840e9dac3f36d..62446394b17686b179f36d71f8f9e7e5797ddf66 100644
--- a/apps/next/src/content/docs/llamaindex/getting_started/meta.json
+++ b/apps/next/src/content/docs/llamaindex/getting_started/meta.json
@@ -1,4 +1,4 @@
 {
   "title": "Getting Started",
-  "pages": ["index", "setup", "starter_tutorial", "environments", "concepts"]
+  "pages": ["index", "create_llama", "examples", "frameworks"]
 }
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/setup/meta.json b/apps/next/src/content/docs/llamaindex/getting_started/setup/meta.json
deleted file mode 100644
index 2a5e97a34850e7c7971d04b0e57ac56b7c42524e..0000000000000000000000000000000000000000
--- a/apps/next/src/content/docs/llamaindex/getting_started/setup/meta.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
-  "title": "Setup",
-  "description": "The setup guide",
-  "defaultOpen": true,
-  "pages": ["index", "next", "node", "typescript", "vite", "cloudflare"]
-}
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/meta.json b/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/meta.json
deleted file mode 100644
index 1ea6d9295c412f89a0479feb7cf26eab9287838a..0000000000000000000000000000000000000000
--- a/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/meta.json
+++ /dev/null
@@ -1,9 +0,0 @@
-{
-  "title": "Starter Tutorials",
-  "pages": [
-    "retrieval_augmented_generation",
-    "chatbot",
-    "structured_data_extraction",
-    "agent"
-  ]
-}
diff --git a/apps/next/src/content/docs/llamaindex/guide/cost-analysis.mdx b/apps/next/src/content/docs/llamaindex/guide/cost-analysis.mdx
deleted file mode 100644
index bab0a75236556f4c0f9baef0b389e7a5ca6d274a..0000000000000000000000000000000000000000
--- a/apps/next/src/content/docs/llamaindex/guide/cost-analysis.mdx
+++ /dev/null
@@ -1,16 +0,0 @@
----
-title: Cost Analysis
----
-
-This page shows how to track LLM cost using APIs.
-
-## Callback Manager
-
-The callback manager is a class that manages the callback functions.
-
-You can register `llm-start`, `llm-end`, and `llm-stream` callbacks to the callback manager for tracking the cost.
-
-import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock';
-import CodeSource from "!raw-loader!../../../../../../../examples/recipes/cost-analysis";
-
-<DynamicCodeBlock lang="ts" code={CodeSource} />
diff --git a/apps/next/src/content/docs/llamaindex/guide/meta.json b/apps/next/src/content/docs/llamaindex/guide/meta.json
deleted file mode 100644
index b5d74a767e3a2b9c77cbb6bc2a1df00a877be53c..0000000000000000000000000000000000000000
--- a/apps/next/src/content/docs/llamaindex/guide/meta.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
-  "title": "Guide",
-  "description": "See our guide",
-  "pages": ["loading", "workflow", "chat", "agents", "cost-analysis"]
-}
diff --git a/apps/next/src/content/docs/llamaindex/index.mdx b/apps/next/src/content/docs/llamaindex/index.mdx
index 2ef5f4e88bb8854cd797a6272876fd8443bca08c..fe5dc0e412ff99c8961506c1d32da4f9c4327a2a 100644
--- a/apps/next/src/content/docs/llamaindex/index.mdx
+++ b/apps/next/src/content/docs/llamaindex/index.mdx
@@ -16,9 +16,13 @@ The TypeScript implementation is designed for JavaScript server side application
 
 LlamaIndex.TS provides tools for beginners, advanced users, and everyone in between.
 
+Try it out with a starter example using StackBlitz:
+
 <iframe
   className="w-full h-[440px]"
   aria-label="LlamaIndex.TS Starter"
   aria-description="This is a starter example for LlamaIndex.TS, it shows the basic usage of the library."
   src="https://stackblitz.com/github/run-llama/LlamaIndexTS/tree/main/examples?embed=1&file=starter.ts"
 />
+
+You'll need an OpenAI API key to run this example. You can retrieve it from [OpenAI](https://platform.openai.com/api-keys).
\ No newline at end of file
diff --git a/apps/next/src/content/docs/llamaindex/meta.json b/apps/next/src/content/docs/llamaindex/meta.json
index 60e633950e9368836b916acfcb7718e5c182e26b..6439cab03c107baa11482c9da5365ff8e120d7fc 100644
--- a/apps/next/src/content/docs/llamaindex/meta.json
+++ b/apps/next/src/content/docs/llamaindex/meta.json
@@ -4,13 +4,11 @@
   "root": true,
   "pages": [
     "---Guide---",
-    "what-is-llamaindex",
     "index",
     "getting_started",
-    "migration",
-    "guide",
-    "examples",
+    "tutorials",
     "modules",
-    "integration"
+    "integration",
+    "migration"
   ]
 }
diff --git a/apps/next/src/content/docs/llamaindex/modules/agent/index.mdx b/apps/next/src/content/docs/llamaindex/migration/deprecated/agent/index.mdx
similarity index 91%
rename from apps/next/src/content/docs/llamaindex/modules/agent/index.mdx
rename to apps/next/src/content/docs/llamaindex/migration/deprecated/agent/index.mdx
index de651097646e4bb7ad31e35fc9920b97f9e20aa1..f3c7d6122497fb0872d69b4dfd272c78ff6161a6 100644
--- a/apps/next/src/content/docs/llamaindex/modules/agent/index.mdx
+++ b/apps/next/src/content/docs/llamaindex/migration/deprecated/agent/index.mdx
@@ -2,6 +2,8 @@
 title: Agents
 ---
 
+**Note**: Agents are deprecated, use [Agent Workflows](/docs/llamaindex/modules/agent_workflow) instead.
+
 An “agent” is an automated reasoning and decision engine. It takes in a user input/query and can make internal decisions for executing that query in order to return the correct result. The key agent components can include, but are not limited to:
 
 - Breaking down a complex question into smaller ones
diff --git a/apps/next/src/content/docs/llamaindex/migration/meta.json b/apps/next/src/content/docs/llamaindex/migration/meta.json
index 7be5abb90f4a1309253b39861708a0535868362e..19bded9a46287c2c10346048fe90a4c5298e790b 100644
--- a/apps/next/src/content/docs/llamaindex/migration/meta.json
+++ b/apps/next/src/content/docs/llamaindex/migration/meta.json
@@ -1,5 +1,5 @@
 {
   "title": "Migration",
   "description": "Migration between different versions",
-  "pages": ["0.8-to-0.9"]
+  "pages": ["0.8-to-0.9", "deprecated"]
 }
diff --git a/apps/next/src/content/docs/llamaindex/modules/agent_workflow.mdx b/apps/next/src/content/docs/llamaindex/modules/agent_workflow.mdx
index be27b4ebd0977734fc95e21df837f828b6c0c0ac..27b5d6495997f9107936db186283bbb31394feb2 100644
--- a/apps/next/src/content/docs/llamaindex/modules/agent_workflow.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/agent_workflow.mdx
@@ -1,9 +1,9 @@
 ---
-title: Agent Workflow
+title: Agent Workflows
 ---
 
 
-Agent Workflows are a powerful system that enables you to create and orchestrate one or multiple agents with tools to perform specific tasks. It's built on top of the base `Workflow` system and provides a streamlined interface for agent interactions.
+Agent Workflows are a powerful system that enables you to create and orchestrate one or multiple agents with tools to perform specific tasks. It's built on top of the base [`Workflow`](./workflows) system and provides a streamlined interface for agent interactions.
 
 ## Usage
 
diff --git a/apps/next/src/content/docs/llamaindex/guide/chat/chat.mdx b/apps/next/src/content/docs/llamaindex/modules/chat/chat.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/guide/chat/chat.mdx
rename to apps/next/src/content/docs/llamaindex/modules/chat/chat.mdx
diff --git a/apps/next/src/content/docs/llamaindex/guide/chat/install.mdx b/apps/next/src/content/docs/llamaindex/modules/chat/install.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/guide/chat/install.mdx
rename to apps/next/src/content/docs/llamaindex/modules/chat/install.mdx
diff --git a/apps/next/src/content/docs/llamaindex/guide/chat/meta.json b/apps/next/src/content/docs/llamaindex/modules/chat/meta.json
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/guide/chat/meta.json
rename to apps/next/src/content/docs/llamaindex/modules/chat/meta.json
diff --git a/apps/next/src/content/docs/llamaindex/guide/chat/rsc.mdx b/apps/next/src/content/docs/llamaindex/modules/chat/rsc.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/guide/chat/rsc.mdx
rename to apps/next/src/content/docs/llamaindex/modules/chat/rsc.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/deepinfra.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/deepinfra.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/deepinfra.mdx
rename to apps/next/src/content/docs/llamaindex/modules/embeddings/deepinfra.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/gemini.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/gemini.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/gemini.mdx
rename to apps/next/src/content/docs/llamaindex/modules/embeddings/gemini.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/huggingface.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/huggingface.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/huggingface.mdx
rename to apps/next/src/content/docs/llamaindex/modules/embeddings/huggingface.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/index.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/index.mdx
index 2ef16f93582eeebfa288e497bf26f08b4d7158da..d03054228bab2163fc0ddebb1c236599228dbd55 100644
--- a/apps/next/src/content/docs/llamaindex/modules/embeddings/index.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/embeddings/index.mdx
@@ -4,7 +4,7 @@ title: Embedding
 
 The embedding model in LlamaIndex is responsible for creating numerical representations of text. By default, LlamaIndex will use the `text-embedding-ada-002` model from OpenAI.
 
-This can be explicitly updated through `Settings`
+This can be explicitly updated through `Settings.embedModel`.
 
 ## Installation
 
@@ -35,7 +35,7 @@ Settings.embedModel = new OpenAIEmbedding({
 
 ## Local Embedding
 
-For local embeddings, you can use the [HuggingFace](/docs/llamaindex/modules/embeddings/available_embeddings/huggingface) embedding model.
+For local embeddings, you can use the [HuggingFace](/docs/llamaindex/modules/embeddings/huggingface) embedding model.
 
 ## Local Ollama Embeddings With Remote Host
 
diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/jinaai.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/jinaai.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/jinaai.mdx
rename to apps/next/src/content/docs/llamaindex/modules/embeddings/jinaai.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/mistral.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/mistral.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/mistral.mdx
rename to apps/next/src/content/docs/llamaindex/modules/embeddings/mistral.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/mixedbreadai.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/mixedbreadai.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/mixedbreadai.mdx
rename to apps/next/src/content/docs/llamaindex/modules/embeddings/mixedbreadai.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/ollama.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/ollama.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/ollama.mdx
rename to apps/next/src/content/docs/llamaindex/modules/embeddings/ollama.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/openai.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/openai.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/openai.mdx
rename to apps/next/src/content/docs/llamaindex/modules/embeddings/openai.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/together.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/together.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/together.mdx
rename to apps/next/src/content/docs/llamaindex/modules/embeddings/together.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/voyageai.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/voyageai.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/voyageai.mdx
rename to apps/next/src/content/docs/llamaindex/modules/embeddings/voyageai.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/anthropic.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/anthropic.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/llms/available_llms/anthropic.mdx
rename to apps/next/src/content/docs/llamaindex/modules/llms/anthropic.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/azure.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/azure.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/llms/available_llms/azure.mdx
rename to apps/next/src/content/docs/llamaindex/modules/llms/azure.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/bedrock.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/bedrock.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/llms/available_llms/bedrock.mdx
rename to apps/next/src/content/docs/llamaindex/modules/llms/bedrock.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/deepinfra.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/deepinfra.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/llms/available_llms/deepinfra.mdx
rename to apps/next/src/content/docs/llamaindex/modules/llms/deepinfra.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/deepseek.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/deepseek.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/llms/available_llms/deepseek.mdx
rename to apps/next/src/content/docs/llamaindex/modules/llms/deepseek.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/fireworks.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/fireworks.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/llms/available_llms/fireworks.mdx
rename to apps/next/src/content/docs/llamaindex/modules/llms/fireworks.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/gemini.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/gemini.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/llms/available_llms/gemini.mdx
rename to apps/next/src/content/docs/llamaindex/modules/llms/gemini.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/groq.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/groq.mdx
similarity index 95%
rename from apps/next/src/content/docs/llamaindex/modules/llms/available_llms/groq.mdx
rename to apps/next/src/content/docs/llamaindex/modules/llms/groq.mdx
index a570a1fef711bb27710cdc35b25b3d2b3f9009b7..f8f17619dcd54feec7633fcddfca4b6a73100949 100644
--- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/groq.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/llms/groq.mdx
@@ -3,7 +3,7 @@ title: Groq
 ---
 
 import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock';
-import CodeSource from "!raw-loader!../../../../../../../../../examples/groq.ts";
+import CodeSource from "!raw-loader!../../../../../../../../examples/groq.ts";
 
 ## Installation
 
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/index.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/index.mdx
index 1ee9b636dca4952027c782692b3cf246a42a62d7..d33a577f1b50d3e2053318a445f0260ee0e86cad 100644
--- a/apps/next/src/content/docs/llamaindex/modules/llms/index.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/llms/index.mdx
@@ -45,7 +45,7 @@ export AZURE_OPENAI_DEPLOYMENT="gpt-4" # or some other deployment name
 
 ## Local LLM
 
-For local LLMs, currently we recommend the use of [Ollama](/docs/llamaindex/modules/llms/available_llms/ollama) LLM.
+For local LLMs, currently we recommend the use of [Ollama](/docs/llamaindex/modules/llms/ollama) LLM.
 
 ## Available LLMs
 
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/llama2.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/llama2.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/llms/available_llms/llama2.mdx
rename to apps/next/src/content/docs/llamaindex/modules/llms/llama2.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/mistral.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/mistral.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/llms/available_llms/mistral.mdx
rename to apps/next/src/content/docs/llamaindex/modules/llms/mistral.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/ollama.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/ollama.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/llms/available_llms/ollama.mdx
rename to apps/next/src/content/docs/llamaindex/modules/llms/ollama.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/openai.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/openai.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/llms/available_llms/openai.mdx
rename to apps/next/src/content/docs/llamaindex/modules/llms/openai.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/perplexity.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/perplexity.mdx
similarity index 87%
rename from apps/next/src/content/docs/llamaindex/modules/llms/available_llms/perplexity.mdx
rename to apps/next/src/content/docs/llamaindex/modules/llms/perplexity.mdx
index f9648ac00ba21d9ad63543e7829676f69a20574d..c9ae988f04c4aed6a02bfe2eb719e742216c8872 100644
--- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/perplexity.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/llms/perplexity.mdx
@@ -1,8 +1,26 @@
 ---
 title: Perplexity LLM
 ---
-## Usage
 
+## Installation
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install @llamaindex/perplexity
+	```
+
+	```shell tab="yarn"
+	yarn add @llamaindex/perplexity
+	```
+
+	```shell tab="pnpm"
+	pnpm add @llamaindex/perplexity
+	```
+</Tabs>
+
+## Usage
 
 ```ts
 import { Settings } from "llamaindex";
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/portkey.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/portkey.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/llms/available_llms/portkey.mdx
rename to apps/next/src/content/docs/llamaindex/modules/llms/portkey.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/together.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/together.mdx
similarity index 94%
rename from apps/next/src/content/docs/llamaindex/modules/llms/available_llms/together.mdx
rename to apps/next/src/content/docs/llamaindex/modules/llms/together.mdx
index a9877cfa3a327394020bdd30bb50ba24bcdb2a96..af3307f303024255796138cebcee229d77164bf2 100644
--- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/together.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/llms/together.mdx
@@ -8,15 +8,15 @@ import { Tab, Tabs } from "fumadocs-ui/components/tabs";
 
 <Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
 	```shell tab="npm"
-	npm install llamaindex
+	npm install @llamaindex/together
 	```
 
 	```shell tab="yarn"
-	yarn add llamaindex
+	yarn add @llamaindex/together
 	```
 
 	```shell tab="pnpm"
-	pnpm add llamaindex
+	pnpm add @llamaindex/together
 	```
 </Tabs>
 
diff --git a/apps/next/src/content/docs/llamaindex/guide/loading/index.mdx b/apps/next/src/content/docs/llamaindex/modules/loading/index.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/guide/loading/index.mdx
rename to apps/next/src/content/docs/llamaindex/modules/loading/index.mdx
diff --git a/apps/next/src/content/docs/llamaindex/guide/loading/meta.json b/apps/next/src/content/docs/llamaindex/modules/loading/meta.json
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/guide/loading/meta.json
rename to apps/next/src/content/docs/llamaindex/modules/loading/meta.json
diff --git a/apps/next/src/content/docs/llamaindex/guide/loading/node-parser.mdx b/apps/next/src/content/docs/llamaindex/modules/loading/node-parser.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/guide/loading/node-parser.mdx
rename to apps/next/src/content/docs/llamaindex/modules/loading/node-parser.mdx
diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/1_setup.mdx b/apps/next/src/content/docs/llamaindex/tutorials/agents/1_setup.mdx
similarity index 98%
rename from apps/next/src/content/docs/llamaindex/guide/agents/1_setup.mdx
rename to apps/next/src/content/docs/llamaindex/tutorials/agents/1_setup.mdx
index 5a35bd32255f9a074aea36d6b87867cca3d0ffcd..e8bab98fdbd8e2981fa3899501d373913f981d7f 100644
--- a/apps/next/src/content/docs/llamaindex/guide/agents/1_setup.mdx
+++ b/apps/next/src/content/docs/llamaindex/tutorials/agents/1_setup.mdx
@@ -1,5 +1,5 @@
 ---
-title: Agent tutorial
+title: 1. Setup
 ---
 
 In this guide we'll walk you through the process of building an Agent in JavaScript using the LlamaIndex.TS library, starting from nothing and adding complexity in stages.
diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/2_create_agent.mdx b/apps/next/src/content/docs/llamaindex/tutorials/agents/2_create_agent.mdx
similarity index 99%
rename from apps/next/src/content/docs/llamaindex/guide/agents/2_create_agent.mdx
rename to apps/next/src/content/docs/llamaindex/tutorials/agents/2_create_agent.mdx
index a74832e2f7c772fa56edba20464faca745b8d017..0914f8a486e7d745f6e1565c5b197b6a1a7df12c 100644
--- a/apps/next/src/content/docs/llamaindex/guide/agents/2_create_agent.mdx
+++ b/apps/next/src/content/docs/llamaindex/tutorials/agents/2_create_agent.mdx
@@ -1,5 +1,5 @@
 ---
-title: Create a basic agent
+title: 2. Create a basic agent
 ---
 
 We want to use `await` so we're going to wrap all of our code in a `main` function, like this:
diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/3_local_model.mdx b/apps/next/src/content/docs/llamaindex/tutorials/agents/3_local_model.mdx
similarity index 97%
rename from apps/next/src/content/docs/llamaindex/guide/agents/3_local_model.mdx
rename to apps/next/src/content/docs/llamaindex/tutorials/agents/3_local_model.mdx
index 25b8c01ce941e83ae48e7823f6d5d5a3fb917d55..1224356a3aac1345c99eb1327f2ace86f6d21f75 100644
--- a/apps/next/src/content/docs/llamaindex/guide/agents/3_local_model.mdx
+++ b/apps/next/src/content/docs/llamaindex/tutorials/agents/3_local_model.mdx
@@ -1,5 +1,5 @@
 ---
-title: Using a local model via Ollama
+title: 3. Using a local model via Ollama
 ---
 
 If you're happy using OpenAI, you can skip this section, but many people are interested in using models they run themselves. The easiest way to do this is via the great work of our friends at [Ollama](https://ollama.com/), who provide a simple to use client that will download, install and run a [growing range of models](https://ollama.com/library) for you.
diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/4_agentic_rag.mdx b/apps/next/src/content/docs/llamaindex/tutorials/agents/4_agentic_rag.mdx
similarity index 71%
rename from apps/next/src/content/docs/llamaindex/guide/agents/4_agentic_rag.mdx
rename to apps/next/src/content/docs/llamaindex/tutorials/agents/4_agentic_rag.mdx
index 76e9fa0fea2884a8fb73a2c1783b4cd584a56a12..d5473554e4a9ef70fd8ed69111e07cbf1749c9b5 100644
--- a/apps/next/src/content/docs/llamaindex/guide/agents/4_agentic_rag.mdx
+++ b/apps/next/src/content/docs/llamaindex/tutorials/agents/4_agentic_rag.mdx
@@ -1,5 +1,5 @@
 ---
-title: Adding Retrieval-Augmented Generation (RAG)
+title: 4. Adding Retrieval-Augmented Generation (RAG)
 ---
 
 While an agent that can perform math is nifty (LLMs are usually not very good at math), LLM-based applications are always more interesting when they work with large amounts of data. In this case, we're going to use a 200-page PDF of the proposed budget of the city of San Francisco for fiscal years 2024-2024 and 2024-2025. It's a great example because it's extremely wordy and full of tables of figures, which present a challenge for humans and LLMs alike.
@@ -87,32 +87,9 @@ By default LlamaIndex will retrieve just the 2 most relevant chunks of text. Thi
 retriever.similarityTopK = 10;
 ```
 
-### Approach 1: Create a Context-Aware Agent
+### Use index.queryTool
 
-With the retriever ready, you can create a **context-aware agent**.
-
-```javascript
-const agent = new OpenAIContextAwareAgent({
-  contextRetriever: retriever,
-});
-
-// Example query to the context-aware agent
-let response = await agent.chat({
-  message: `What's the budget of San Francisco in 2023-2024?`,
-});
-
-console.log(response);
-```
-
-**Expected Output:**
-
-```md
-The total budget for the City and County of San Francisco for the fiscal year 2023-2024 is $14.6 billion. This represents a $611.8 million, or 4.4 percent, increase over the previous fiscal year's budget. The budget covers various expenditures across different departments and services, including significant allocations to public works, transportation, commerce, public protection, and health services.
-```
-
-### Approach 2: Using QueryEngineTool (Alternative Approach)
-
-If you prefer more flexibility and don't mind additional complexity, you can create a `QueryEngineTool`. This approach allows you to define the query logic, providing a more tailored way to interact with the data, but note that it introduces a delay due to the extra tool call.
+`index.queryTool` creates a `QueryEngineTool` that can be used be an agent to query data from the index. 
 
 ```javascript
 const tools = [
@@ -125,9 +102,9 @@ const tools = [
 ];
 
 // Create an agent using the tools array
-const myAgent = agent({ tools });
+const ragAgent = agent({ tools });
 
-let toolResponse = await myAgent.run("What's the budget of San Francisco in 2023-2024?");
+let toolResponse = await ragAgent.run("What's the budget of San Francisco in 2023-2024?");
 
 console.log(toolResponse);
 ```
@@ -155,10 +132,4 @@ console.log(toolResponse);
 
 Once again we see a `toolResult`. You can see the query the LLM decided to send to the query engine ("total budget"), and the output the engine returned. In `response.message` you see that the LLM has returned the output from the tool almost verbatim, although it trimmed out the bit about 2024-2025 since we didn't ask about that year.
 
-### Comparison of Approaches
-
-The `OpenAIContextAwareAgent` approach simplifies the setup by allowing you to directly link the retriever to the agent, making it straightforward to access relevant context for your queries. This is ideal for situations where you want easy integration with existing data sources, like a context chat engine.
-
-On the other hand, using the `QueryEngineTool` offers more flexibility and power. This method allows for customization in how queries are constructed and executed, enabling you to query data from various storages and process them in different ways. However, this added flexibility comes with increased complexity and response time due to the separate tool call and queryEngine generating tool output by LLM that is then passed to the agent.
-
 So now we have an agent that can index complicated documents and answer questions about them. Let's [combine our math agent and our RAG agent](5_rag_and_tools)!
diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/5_rag_and_tools.mdx b/apps/next/src/content/docs/llamaindex/tutorials/agents/5_rag_and_tools.mdx
similarity index 99%
rename from apps/next/src/content/docs/llamaindex/guide/agents/5_rag_and_tools.mdx
rename to apps/next/src/content/docs/llamaindex/tutorials/agents/5_rag_and_tools.mdx
index 0c23b4ae3195d8dbb6946d2d7fba6e2e92c042f3..971bd33a9a78e8863af6c50a0c220b7dce734e02 100644
--- a/apps/next/src/content/docs/llamaindex/guide/agents/5_rag_and_tools.mdx
+++ b/apps/next/src/content/docs/llamaindex/tutorials/agents/5_rag_and_tools.mdx
@@ -1,5 +1,5 @@
 ---
-title: A RAG agent that does math
+title: 5. A RAG agent that does math
 ---
 
 In [our third iteration of the agent](https://github.com/run-llama/ts-agents/blob/main/3_rag_and_tools/agent.ts) we've combined the two previous agents, so we've defined both `sumNumbers` and a `QueryEngineTool` and created an array of two tools. The tools support both Zod and JSON Schema for parameter definition:
diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/6_llamaparse.mdx b/apps/next/src/content/docs/llamaindex/tutorials/agents/6_llamaparse.mdx
similarity index 97%
rename from apps/next/src/content/docs/llamaindex/guide/agents/6_llamaparse.mdx
rename to apps/next/src/content/docs/llamaindex/tutorials/agents/6_llamaparse.mdx
index 1eb845b954802d93ab4363bf07fd7bb1fa32d8dd..fcdf0aa81227dbc18e743618965ab0a32152169d 100644
--- a/apps/next/src/content/docs/llamaindex/guide/agents/6_llamaparse.mdx
+++ b/apps/next/src/content/docs/llamaindex/tutorials/agents/6_llamaparse.mdx
@@ -1,5 +1,5 @@
 ---
-title: Adding LlamaParse
+title: 6. Adding LlamaParse
 ---
 
 Complicated PDFs can be very tricky for LLMs to understand. To help with this, LlamaIndex provides LlamaParse, a hosted service that parses complex documents including PDFs. To use it, get a `LLAMA_CLOUD_API_KEY` by [signing up for LlamaCloud](https://cloud.llamaindex.ai/) (it's free for up to 1000 pages/day) and adding it to your `.env` file just as you did for your OpenAI key:
diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/7_qdrant.mdx b/apps/next/src/content/docs/llamaindex/tutorials/agents/7_qdrant.mdx
similarity index 98%
rename from apps/next/src/content/docs/llamaindex/guide/agents/7_qdrant.mdx
rename to apps/next/src/content/docs/llamaindex/tutorials/agents/7_qdrant.mdx
index eb3c4500530d2e6ffd8b04adc900c8b63212bbaf..1c0f723b341b2227cc8ded5dbaae457e06c7b800 100644
--- a/apps/next/src/content/docs/llamaindex/guide/agents/7_qdrant.mdx
+++ b/apps/next/src/content/docs/llamaindex/tutorials/agents/7_qdrant.mdx
@@ -1,5 +1,5 @@
 ---
-title: Adding persistent vector storage
+title: 7. Adding persistent vector storage
 ---
 
 In the previous examples, we've been loading our data into memory each time we run the agent. This is fine for small datasets, but for larger datasets you'll want to store your embeddings in a database. LlamaIndex.TS provides a `VectorStore` class that can store your embeddings in a variety of databases. We're going to use [Qdrant](https://qdrant.tech/), a popular vector store, for this example.
diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/images/agent_flow.png b/apps/next/src/content/docs/llamaindex/tutorials/agents/images/agent_flow.png
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/guide/agents/images/agent_flow.png
rename to apps/next/src/content/docs/llamaindex/tutorials/agents/images/agent_flow.png
diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/meta.json b/apps/next/src/content/docs/llamaindex/tutorials/agents/meta.json
similarity index 84%
rename from apps/next/src/content/docs/llamaindex/guide/agents/meta.json
rename to apps/next/src/content/docs/llamaindex/tutorials/agents/meta.json
index 08129224144dccbc5b400650689a978caf20b2ef..5579c55f208805a9928ac885f70e759459b43bf9 100644
--- a/apps/next/src/content/docs/llamaindex/guide/agents/meta.json
+++ b/apps/next/src/content/docs/llamaindex/tutorials/agents/meta.json
@@ -1,5 +1,5 @@
 {
-  "title": "Agents",
+  "title": "Agent with RAG",
   "pages": [
     "1_setup",
     "2_create_agent",
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/agent.mdx b/apps/next/src/content/docs/llamaindex/tutorials/basic_agent.mdx
similarity index 75%
rename from apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/agent.mdx
rename to apps/next/src/content/docs/llamaindex/tutorials/basic_agent.mdx
index ffa84aa94c20bcab6abd09f114026d27103f4951..85377bee5a928cd2cea04a7976d09c8d491a7af0 100644
--- a/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/agent.mdx
+++ b/apps/next/src/content/docs/llamaindex/tutorials/basic_agent.mdx
@@ -1,11 +1,11 @@
 ---
-title: Agent tutorial
+title: Basic Agent 
 ---
 
 import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock';
-import CodeSource from "!raw-loader!../../../../../../../../examples/agent/openai";
+import CodeSource from "!raw-loader!../../../../../../../examples/agent/openai";
 
-We have a comprehensive, step-by-step [guide to building agents in LlamaIndex.TS](../../guides/agents/setup) that we recommend to learn what agents are and how to build them for production. But building a basic agent is simple:
+We have a comprehensive, step-by-step [guide to building agents in LlamaIndex.TS](./agents/1_setup) that we recommend to learn what agents are and how to build them for production. But building a basic agent is simple:
 
 ## Set up
 
diff --git a/apps/next/src/content/docs/llamaindex/examples/local_llm.mdx b/apps/next/src/content/docs/llamaindex/tutorials/local_llm.mdx
similarity index 92%
rename from apps/next/src/content/docs/llamaindex/examples/local_llm.mdx
rename to apps/next/src/content/docs/llamaindex/tutorials/local_llm.mdx
index 6faae94cb2a2e3936282d1b3c7c407e97513898d..abcf2f07bb1d64c7fcb8a5b1ac3437b0bd3d8ee5 100644
--- a/apps/next/src/content/docs/llamaindex/examples/local_llm.mdx
+++ b/apps/next/src/content/docs/llamaindex/tutorials/local_llm.mdx
@@ -4,7 +4,7 @@ title: Local LLMs
 
 import { Tab, Tabs } from "fumadocs-ui/components/tabs";
 
-LlamaIndex.TS supports OpenAI and [other remote LLM APIs](other_llms). You can also run a local LLM on your machine!
+LlamaIndex.TS supports OpenAI and [other remote LLM APIs](/docs/llamaindex/modules/llms). You can also run a local LLM on your machine!
 
 ## Using a local model via Ollama
 
@@ -45,7 +45,10 @@ To switch the LLM in your code, you first need to make sure to install the packa
 Then, to tell LlamaIndex to use a local LLM, use the `Settings` object:
 
 ```javascript
-Settings.llm = new Ollama({
+import { Settings } from "llamaindex";
+import { ollama } from "@llamaindex/ollama";
+
+Settings.llm = ollama({
   model: "mixtral:8x7b",
 });
 ```
diff --git a/apps/next/src/content/docs/llamaindex/tutorials/meta.json b/apps/next/src/content/docs/llamaindex/tutorials/meta.json
new file mode 100644
index 0000000000000000000000000000000000000000..6145b9e27f2e0927c1f19b0175d530004091747d
--- /dev/null
+++ b/apps/next/src/content/docs/llamaindex/tutorials/meta.json
@@ -0,0 +1,12 @@
+{
+  "title": "Tutorials",
+  "pages": [
+    "rag",
+    "basic_agent",
+    "agents",
+    "workflow",
+    "local_llm",
+    "chatbot",
+    "structured_data_extraction"
+  ]
+}
diff --git a/apps/next/src/content/docs/llamaindex/_static/concepts/indexing.jpg b/apps/next/src/content/docs/llamaindex/tutorials/rag/_static/concepts/indexing.jpg
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/_static/concepts/indexing.jpg
rename to apps/next/src/content/docs/llamaindex/tutorials/rag/_static/concepts/indexing.jpg
diff --git a/apps/next/src/content/docs/llamaindex/_static/concepts/querying.jpg b/apps/next/src/content/docs/llamaindex/tutorials/rag/_static/concepts/querying.jpg
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/_static/concepts/querying.jpg
rename to apps/next/src/content/docs/llamaindex/tutorials/rag/_static/concepts/querying.jpg
diff --git a/apps/next/src/content/docs/llamaindex/_static/concepts/rag.jpg b/apps/next/src/content/docs/llamaindex/tutorials/rag/_static/concepts/rag.jpg
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/_static/concepts/rag.jpg
rename to apps/next/src/content/docs/llamaindex/tutorials/rag/_static/concepts/rag.jpg
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/concepts.mdx b/apps/next/src/content/docs/llamaindex/tutorials/rag/concepts.mdx
similarity index 97%
rename from apps/next/src/content/docs/llamaindex/getting_started/concepts.mdx
rename to apps/next/src/content/docs/llamaindex/tutorials/rag/concepts.mdx
index 4189c3204bd780eadbe5e536b11bfdead31dc1a6..99cc8423e3b26b6475d133aecb907454f7c56768 100644
--- a/apps/next/src/content/docs/llamaindex/getting_started/concepts.mdx
+++ b/apps/next/src/content/docs/llamaindex/tutorials/rag/concepts.mdx
@@ -16,7 +16,7 @@ LlamaIndex uses a two stage method when using an LLM with your data:
 1. **indexing stage**: preparing a knowledge base, and
 2. **querying stage**: retrieving relevant context from the knowledge to assist the LLM in responding to a question
 
-![](../_static/concepts/rag.jpg)
+![](./_static/concepts/rag.jpg)
 
 This process is also known as Retrieval Augmented Generation (RAG).
 
@@ -28,7 +28,7 @@ Let's explore each stage in detail.
 
 LlamaIndex.TS help you prepare the knowledge base with a suite of data connectors and indexes.
 
-![](../_static/concepts/indexing.jpg)
+![](./_static/concepts/indexing.jpg)
 
 [**Data Loaders**](/docs/llamaindex/modules/data_loaders/index):
 A data connector (i.e. `Reader`) ingest data from different data sources and data formats into a simple `Document` representation (text and simple metadata).
@@ -54,7 +54,7 @@ LlamaIndex provides composable modules that help you build and integrate RAG pip
 
 These building blocks can be customized to reflect ranking preferences, as well as composed to reason over multiple knowledge bases in a structured way.
 
-![](../_static/concepts/querying.jpg)
+![](./_static/concepts/querying.jpg)
 
 #### Building Blocks
 
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/retrieval_augmented_generation.mdx b/apps/next/src/content/docs/llamaindex/tutorials/rag/index.mdx
similarity index 89%
rename from apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/retrieval_augmented_generation.mdx
rename to apps/next/src/content/docs/llamaindex/tutorials/rag/index.mdx
index eef95d0e80663771d525038af6da91a6ba924063..c253efaaf1652baef040e6d236c473f7608ab3b6 100644
--- a/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/retrieval_augmented_generation.mdx
+++ b/apps/next/src/content/docs/llamaindex/tutorials/rag/index.mdx
@@ -1,12 +1,12 @@
 ---
-title: Retrieval Augmented Generation (RAG) Tutorial
+title: Retrieval Augmented Generation (RAG) 
 ---
 
 import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock';
 import CodeSource from "!raw-loader!../../../../../../../../examples/vectorIndex";
 import TSConfigSource from "!!raw-loader!../../../../../../../../examples/tsconfig.json";
 
-One of the most common use-cases for LlamaIndex is Retrieval-Augmented Generation or RAG, in which your data is indexed and selectively retrieved to be given to an LLM as source material for responding to a query. You can learn more about the [concepts behind RAG](../concepts).
+One of the most common use-cases for LlamaIndex is Retrieval-Augmented Generation or RAG, in which your data is indexed and selectively retrieved to be given to an LLM as source material for responding to a query. You can learn more about the [concepts behind RAG](./rag/concepts).
 
 ## Set up the project
 
@@ -19,7 +19,7 @@ npm install -D typescript @types/node
 
 Then, check out the [installation](../setup) steps to install LlamaIndex.TS and prepare an OpenAI key.
 
-You can use [other LLMs](../../examples/other_llms) via their APIs; if you would prefer to use local models check out our [local LLM example](../../examples/local_llm).
+You can use [other LLMs](/docs/llamaindex/modules/llms) via their APIs; if you would prefer to use local models check out our [local LLM example](./local_llm).
 
 ## Run queries
 
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/structured_data_extraction.mdx b/apps/next/src/content/docs/llamaindex/tutorials/structured_data_extraction.mdx
similarity index 80%
rename from apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/structured_data_extraction.mdx
rename to apps/next/src/content/docs/llamaindex/tutorials/structured_data_extraction.mdx
index e88d93efe5b05c5d8323df847cc626371f7a28d3..696ca66e389f296cf70b90416791f2611b6da6a7 100644
--- a/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/structured_data_extraction.mdx
+++ b/apps/next/src/content/docs/llamaindex/tutorials/structured_data_extraction.mdx
@@ -1,13 +1,13 @@
 ---
-title: Structured data extraction tutorial
+title: Structured data extraction 
 ---
 
 import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock';
-import CodeSource from "!raw-loader!../../../../../../../../examples/jsonExtract";
+import CodeSource from "!raw-loader!../../../../../../../examples/jsonExtract";
 
 Make sure you have installed LlamaIndex.TS and have an OpenAI key. If you haven't, check out the [installation](../setup) guide.
 
-You can use [other LLMs](../../examples/other_llms) via their APIs; if you would prefer to use local models check out our [local LLM example](../../examples/local_llm).
+You can use [other LLMs](/docs/llamaindex/modules/llms) via their APIs; if you would prefer to use local models check out our [local LLM example](./local_llm).
 
 ## Set up
 
diff --git a/apps/next/src/content/docs/llamaindex/guide/workflow/different-inputs-outputs.mdx b/apps/next/src/content/docs/llamaindex/tutorials/workflow/different-inputs-outputs.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/guide/workflow/different-inputs-outputs.mdx
rename to apps/next/src/content/docs/llamaindex/tutorials/workflow/different-inputs-outputs.mdx
diff --git a/apps/next/src/content/docs/llamaindex/guide/workflow/index.mdx b/apps/next/src/content/docs/llamaindex/tutorials/workflow/index.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/guide/workflow/index.mdx
rename to apps/next/src/content/docs/llamaindex/tutorials/workflow/index.mdx
diff --git a/apps/next/src/content/docs/llamaindex/guide/workflow/meta.json b/apps/next/src/content/docs/llamaindex/tutorials/workflow/meta.json
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/guide/workflow/meta.json
rename to apps/next/src/content/docs/llamaindex/tutorials/workflow/meta.json
diff --git a/apps/next/src/content/docs/llamaindex/guide/workflow/streaming.mdx b/apps/next/src/content/docs/llamaindex/tutorials/workflow/streaming.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/guide/workflow/streaming.mdx
rename to apps/next/src/content/docs/llamaindex/tutorials/workflow/streaming.mdx
diff --git a/examples/agentworkflow/single-agent.ts b/examples/agentworkflow/single-agent.ts
index ea94e44a11137e337cd1adcd55666f1712f7b2eb..1e55cc355e2759df1f7ad09ce77ee1bff2498711 100644
--- a/examples/agentworkflow/single-agent.ts
+++ b/examples/agentworkflow/single-agent.ts
@@ -1,36 +1,31 @@
 /**
- * This example shows how to use AgentWorkflow as a single agent with tools
+ * This example shows how to use a single agent with a tool
  */
 import { openai } from "@llamaindex/openai";
-import { Settings, agent } from "llamaindex";
+import { agent } from "llamaindex";
 import { getWeatherTool } from "../agent/utils/tools";
 
-Settings.llm = openai({
-  model: "gpt-4o",
-});
-
-async function singleWeatherAgent() {
-  const workflow = agent({
+async function main() {
+  const weatherAgent = agent({
+    llm: openai({
+      model: "gpt-4o",
+    }),
     tools: [getWeatherTool],
     verbose: false,
   });
 
-  const workflowContext = workflow.run(
-    "What's the weather like in San Francisco?",
-  );
-  const sfResult = await workflowContext;
-  // The weather in San Francisco, CA is currently sunny.
-  console.log(`${JSON.stringify(sfResult, null, 2)}`);
+  // Run the agent and keep the context
+  const context = weatherAgent.run("What's the weather like in San Francisco?");
+  const result = await context;
+  console.log(`${JSON.stringify(result, null, 2)}`);
 
   // Reuse the context from the previous run
-  const workflowContext2 = workflow.run("Compare it with California?", {
-    context: workflowContext.data,
+  const caResult = await weatherAgent.run("Compare it with California?", {
+    context: context.data,
   });
-  const caResult = await workflowContext2;
-  // Both San Francisco and California are currently experiencing sunny weather.
   console.log(`${JSON.stringify(caResult, null, 2)}`);
 }
 
-singleWeatherAgent().catch((error) => {
+main().catch((error) => {
   console.error("Error:", error);
 });
diff --git a/packages/community/README.md b/packages/community/README.md
index 0f108a70d4774eca683553334608cc6251db99ce..2051da7edf5ecfed1c2bc00e83f85baf1f74a154 100644
--- a/packages/community/README.md
+++ b/packages/community/README.md
@@ -5,8 +5,8 @@
 ## Current Features:
 
 - Bedrock support for Amazon Nova models Pro, Lite and Micro
-- Bedrock support for the Anthropic Claude Models [usage](https://ts.llamaindex.ai/modules/llms/available_llms/bedrock) including the latest Sonnet 3.5 v2 and Haiku 3.5
-- Bedrock support for the Meta LLama 2, 3, 3.1 and 3.2 Models [usage](https://ts.llamaindex.ai/modules/llms/available_llms/bedrock)
+- Bedrock support for the Anthropic Claude Models [usage](https://ts.llamaindex.ai/docs/llamaindex/modules/llms/bedrock) including the latest Sonnet 3.5 v2 and Haiku 3.5
+- Bedrock support for the Meta LLama 2, 3, 3.1 and 3.2 Models [usage](https://ts.llamaindex.ai/docs/llamaindex/modules/llms/bedrock)
 - Meta LLama3.1 405b and Llama3.2 tool call support
 - Meta 3.2 11B and 90B vision support
 - Bedrock support for querying Knowledge Base