From fa40b3651672b57b5894705b3130fca38a52fd69 Mon Sep 17 00:00:00 2001
From: Marcus Schiesser <mail@marcusschiesser.de>
Date: Wed, 12 Mar 2025 17:54:20 +0700
Subject: [PATCH] docs: cleanup (#1745)

---
 .../docs/llamaindex/examples/agent.mdx        |  12 ----
 .../docs/llamaindex/examples/agent_gemini.mdx |  28 --------
 .../docs/llamaindex/examples/chat_engine.mdx  |  10 ---
 .../examples/context_aware_agent.mdx          |  59 ----------------
 .../docs/llamaindex/examples/meta.json        |  15 ----
 .../docs/llamaindex/examples/other_llms.mdx   |  66 ------------------
 .../llamaindex/examples/save_load_index.mdx   |   8 ---
 .../llamaindex/examples/summary_index.mdx     |   8 ---
 .../docs/llamaindex/examples/vector_index.mdx |   8 ---
 .../chatbot.mdx => create_llama.mdx}          |   6 +-
 .../examples.mdx}                             |  15 +++-
 .../{setup => frameworks}/cloudflare.mdx      |   0
 .../{setup => frameworks}/index.mdx           |   2 +-
 .../getting_started/frameworks/meta.json      |   6 ++
 .../{setup => frameworks}/next.mdx            |   0
 .../{setup => frameworks}/node.mdx            |   2 +-
 .../{setup => frameworks}/typescript.mdx      |   0
 .../{setup => frameworks}/vite.mdx            |   0
 .../images/create_llama.png                   | Bin
 .../docs/llamaindex/getting_started/index.mdx |  10 +--
 .../docs/llamaindex/getting_started/meta.json |   2 +-
 .../getting_started/setup/meta.json           |   6 --
 .../starter_tutorial/meta.json                |   9 ---
 .../docs/llamaindex/guide/cost-analysis.mdx   |  16 -----
 .../content/docs/llamaindex/guide/meta.json   |   5 --
 .../src/content/docs/llamaindex/index.mdx     |   4 ++
 .../src/content/docs/llamaindex/meta.json     |   8 +--
 .../deprecated}/agent/index.mdx               |   2 +
 .../docs/llamaindex/migration/meta.json       |   2 +-
 .../llamaindex/modules/agent_workflow.mdx     |   4 +-
 .../{guide => modules}/chat/chat.mdx          |   0
 .../{guide => modules}/chat/install.mdx       |   0
 .../{guide => modules}/chat/meta.json         |   0
 .../{guide => modules}/chat/rsc.mdx           |   0
 .../{available_embeddings => }/deepinfra.mdx  |   0
 .../{available_embeddings => }/gemini.mdx     |   0
 .../huggingface.mdx                           |   0
 .../llamaindex/modules/embeddings/index.mdx   |   4 +-
 .../{available_embeddings => }/jinaai.mdx     |   0
 .../{available_embeddings => }/mistral.mdx    |   0
 .../mixedbreadai.mdx                          |   0
 .../{available_embeddings => }/ollama.mdx     |   0
 .../{available_embeddings => }/openai.mdx     |   0
 .../{available_embeddings => }/together.mdx   |   0
 .../{available_embeddings => }/voyageai.mdx   |   0
 .../llms/{available_llms => }/anthropic.mdx   |   0
 .../llms/{available_llms => }/azure.mdx       |   0
 .../llms/{available_llms => }/bedrock.mdx     |   0
 .../llms/{available_llms => }/deepinfra.mdx   |   0
 .../llms/{available_llms => }/deepseek.mdx    |   0
 .../llms/{available_llms => }/fireworks.mdx   |   0
 .../llms/{available_llms => }/gemini.mdx      |   0
 .../llms/{available_llms => }/groq.mdx        |   2 +-
 .../docs/llamaindex/modules/llms/index.mdx    |   2 +-
 .../llms/{available_llms => }/llama2.mdx      |   0
 .../llms/{available_llms => }/mistral.mdx     |   0
 .../llms/{available_llms => }/ollama.mdx      |   0
 .../llms/{available_llms => }/openai.mdx      |   0
 .../llms/{available_llms => }/perplexity.mdx  |  20 +++++-
 .../llms/{available_llms => }/portkey.mdx     |   0
 .../llms/{available_llms => }/together.mdx    |   6 +-
 .../{guide => modules}/loading/index.mdx      |   0
 .../{guide => modules}/loading/meta.json      |   0
 .../loading/node-parser.mdx                   |   0
 .../{guide => tutorials}/agents/1_setup.mdx   |   2 +-
 .../agents/2_create_agent.mdx                 |   2 +-
 .../agents/3_local_model.mdx                  |   2 +-
 .../agents/4_agentic_rag.mdx                  |  39 ++---------
 .../agents/5_rag_and_tools.mdx                |   2 +-
 .../agents/6_llamaparse.mdx                   |   2 +-
 .../{guide => tutorials}/agents/7_qdrant.mdx  |   2 +-
 .../agents/images/agent_flow.png              | Bin
 .../{guide => tutorials}/agents/meta.json     |   2 +-
 .../agent.mdx => tutorials/basic_agent.mdx}   |   6 +-
 .../{examples => tutorials}/local_llm.mdx     |   7 +-
 .../docs/llamaindex/tutorials/meta.json       |  12 ++++
 .../rag}/_static/concepts/indexing.jpg        | Bin
 .../rag}/_static/concepts/querying.jpg        | Bin
 .../rag}/_static/concepts/rag.jpg             | Bin
 .../rag}/concepts.mdx                         |   6 +-
 .../rag/index.mdx}                            |   6 +-
 .../structured_data_extraction.mdx            |   6 +-
 .../workflow/different-inputs-outputs.mdx     |   0
 .../{guide => tutorials}/workflow/index.mdx   |   0
 .../{guide => tutorials}/workflow/meta.json   |   0
 .../workflow/streaming.mdx                    |   0
 examples/agentworkflow/single-agent.ts        |  33 ++++-----
 packages/community/README.md                  |   4 +-
 88 files changed, 123 insertions(+), 357 deletions(-)
 delete mode 100644 apps/next/src/content/docs/llamaindex/examples/agent.mdx
 delete mode 100644 apps/next/src/content/docs/llamaindex/examples/agent_gemini.mdx
 delete mode 100644 apps/next/src/content/docs/llamaindex/examples/chat_engine.mdx
 delete mode 100644 apps/next/src/content/docs/llamaindex/examples/context_aware_agent.mdx
 delete mode 100644 apps/next/src/content/docs/llamaindex/examples/meta.json
 delete mode 100644 apps/next/src/content/docs/llamaindex/examples/other_llms.mdx
 delete mode 100644 apps/next/src/content/docs/llamaindex/examples/save_load_index.mdx
 delete mode 100644 apps/next/src/content/docs/llamaindex/examples/summary_index.mdx
 delete mode 100644 apps/next/src/content/docs/llamaindex/examples/vector_index.mdx
 rename apps/next/src/content/docs/llamaindex/getting_started/{starter_tutorial/chatbot.mdx => create_llama.mdx} (73%)
 rename apps/next/src/content/docs/llamaindex/{examples/more_examples.mdx => getting_started/examples.mdx} (63%)
 rename apps/next/src/content/docs/llamaindex/getting_started/{setup => frameworks}/cloudflare.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/getting_started/{setup => frameworks}/index.mdx (97%)
 create mode 100644 apps/next/src/content/docs/llamaindex/getting_started/frameworks/meta.json
 rename apps/next/src/content/docs/llamaindex/getting_started/{setup => frameworks}/next.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/getting_started/{setup => frameworks}/node.mdx (97%)
 rename apps/next/src/content/docs/llamaindex/getting_started/{setup => frameworks}/typescript.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/getting_started/{setup => frameworks}/vite.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/getting_started/{starter_tutorial => }/images/create_llama.png (100%)
 delete mode 100644 apps/next/src/content/docs/llamaindex/getting_started/setup/meta.json
 delete mode 100644 apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/meta.json
 delete mode 100644 apps/next/src/content/docs/llamaindex/guide/cost-analysis.mdx
 delete mode 100644 apps/next/src/content/docs/llamaindex/guide/meta.json
 rename apps/next/src/content/docs/llamaindex/{modules => migration/deprecated}/agent/index.mdx (91%)
 rename apps/next/src/content/docs/llamaindex/{guide => modules}/chat/chat.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/{guide => modules}/chat/install.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/{guide => modules}/chat/meta.json (100%)
 rename apps/next/src/content/docs/llamaindex/{guide => modules}/chat/rsc.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/modules/embeddings/{available_embeddings => }/deepinfra.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/modules/embeddings/{available_embeddings => }/gemini.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/modules/embeddings/{available_embeddings => }/huggingface.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/modules/embeddings/{available_embeddings => }/jinaai.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/modules/embeddings/{available_embeddings => }/mistral.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/modules/embeddings/{available_embeddings => }/mixedbreadai.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/modules/embeddings/{available_embeddings => }/ollama.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/modules/embeddings/{available_embeddings => }/openai.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/modules/embeddings/{available_embeddings => }/together.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/modules/embeddings/{available_embeddings => }/voyageai.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/modules/llms/{available_llms => }/anthropic.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/modules/llms/{available_llms => }/azure.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/modules/llms/{available_llms => }/bedrock.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/modules/llms/{available_llms => }/deepinfra.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/modules/llms/{available_llms => }/deepseek.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/modules/llms/{available_llms => }/fireworks.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/modules/llms/{available_llms => }/gemini.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/modules/llms/{available_llms => }/groq.mdx (95%)
 rename apps/next/src/content/docs/llamaindex/modules/llms/{available_llms => }/llama2.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/modules/llms/{available_llms => }/mistral.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/modules/llms/{available_llms => }/ollama.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/modules/llms/{available_llms => }/openai.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/modules/llms/{available_llms => }/perplexity.mdx (87%)
 rename apps/next/src/content/docs/llamaindex/modules/llms/{available_llms => }/portkey.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/modules/llms/{available_llms => }/together.mdx (94%)
 rename apps/next/src/content/docs/llamaindex/{guide => modules}/loading/index.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/{guide => modules}/loading/meta.json (100%)
 rename apps/next/src/content/docs/llamaindex/{guide => modules}/loading/node-parser.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/{guide => tutorials}/agents/1_setup.mdx (98%)
 rename apps/next/src/content/docs/llamaindex/{guide => tutorials}/agents/2_create_agent.mdx (99%)
 rename apps/next/src/content/docs/llamaindex/{guide => tutorials}/agents/3_local_model.mdx (97%)
 rename apps/next/src/content/docs/llamaindex/{guide => tutorials}/agents/4_agentic_rag.mdx (71%)
 rename apps/next/src/content/docs/llamaindex/{guide => tutorials}/agents/5_rag_and_tools.mdx (99%)
 rename apps/next/src/content/docs/llamaindex/{guide => tutorials}/agents/6_llamaparse.mdx (97%)
 rename apps/next/src/content/docs/llamaindex/{guide => tutorials}/agents/7_qdrant.mdx (98%)
 rename apps/next/src/content/docs/llamaindex/{guide => tutorials}/agents/images/agent_flow.png (100%)
 rename apps/next/src/content/docs/llamaindex/{guide => tutorials}/agents/meta.json (84%)
 rename apps/next/src/content/docs/llamaindex/{getting_started/starter_tutorial/agent.mdx => tutorials/basic_agent.mdx} (75%)
 rename apps/next/src/content/docs/llamaindex/{examples => tutorials}/local_llm.mdx (92%)
 create mode 100644 apps/next/src/content/docs/llamaindex/tutorials/meta.json
 rename apps/next/src/content/docs/llamaindex/{ => tutorials/rag}/_static/concepts/indexing.jpg (100%)
 rename apps/next/src/content/docs/llamaindex/{ => tutorials/rag}/_static/concepts/querying.jpg (100%)
 rename apps/next/src/content/docs/llamaindex/{ => tutorials/rag}/_static/concepts/rag.jpg (100%)
 rename apps/next/src/content/docs/llamaindex/{getting_started => tutorials/rag}/concepts.mdx (97%)
 rename apps/next/src/content/docs/llamaindex/{getting_started/starter_tutorial/retrieval_augmented_generation.mdx => tutorials/rag/index.mdx} (89%)
 rename apps/next/src/content/docs/llamaindex/{getting_started/starter_tutorial => tutorials}/structured_data_extraction.mdx (80%)
 rename apps/next/src/content/docs/llamaindex/{guide => tutorials}/workflow/different-inputs-outputs.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/{guide => tutorials}/workflow/index.mdx (100%)
 rename apps/next/src/content/docs/llamaindex/{guide => tutorials}/workflow/meta.json (100%)
 rename apps/next/src/content/docs/llamaindex/{guide => tutorials}/workflow/streaming.mdx (100%)

diff --git a/apps/next/src/content/docs/llamaindex/examples/agent.mdx b/apps/next/src/content/docs/llamaindex/examples/agent.mdx
deleted file mode 100644
index 84b4fb29f..000000000
--- a/apps/next/src/content/docs/llamaindex/examples/agent.mdx
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Agents
----
-
-A built-in agent that can take decisions and reasoning based on the tools provided to it.
-
-## OpenAI Agent
-
-import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock';
-import CodeSource from "!raw-loader!../../../../../../../examples/agent/openai";
-
-<DynamicCodeBlock lang="ts" code={CodeSource} />
diff --git a/apps/next/src/content/docs/llamaindex/examples/agent_gemini.mdx b/apps/next/src/content/docs/llamaindex/examples/agent_gemini.mdx
deleted file mode 100644
index e9caf43ac..000000000
--- a/apps/next/src/content/docs/llamaindex/examples/agent_gemini.mdx
+++ /dev/null
@@ -1,28 +0,0 @@
----
-title: Gemini Agent
----
-
-import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock';
-import CodeSourceGemini from "!raw-loader!../../../../../../../examples/gemini/agent.ts";
-
-## Installation
-
-import { Tab, Tabs } from "fumadocs-ui/components/tabs";
-
-<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
-	```shell tab="npm"
-	npm install llamaindex @llamaindex/google
-	```
-
-	```shell tab="yarn"
-	yarn add llamaindex @llamaindex/google
-	```
-
-	```shell tab="pnpm"
-	pnpm add llamaindex @llamaindex/google
-	```
-</Tabs>
-
-## Source 
-
-<DynamicCodeBlock lang="ts" code={CodeSourceGemini} />
diff --git a/apps/next/src/content/docs/llamaindex/examples/chat_engine.mdx b/apps/next/src/content/docs/llamaindex/examples/chat_engine.mdx
deleted file mode 100644
index ac4528951..000000000
--- a/apps/next/src/content/docs/llamaindex/examples/chat_engine.mdx
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: Chat Engine
----
-
-import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock';
-import CodeSource from "!raw-loader!../../../../../../../examples/chatEngine";
-
-Chat Engine is a class that allows you to create a chatbot from a retriever. It is a wrapper around a retriever that allows you to chat with it in a conversational manner.
-
-<DynamicCodeBlock lang="ts" code={CodeSource} />
diff --git a/apps/next/src/content/docs/llamaindex/examples/context_aware_agent.mdx b/apps/next/src/content/docs/llamaindex/examples/context_aware_agent.mdx
deleted file mode 100644
index d2bc34463..000000000
--- a/apps/next/src/content/docs/llamaindex/examples/context_aware_agent.mdx
+++ /dev/null
@@ -1,59 +0,0 @@
----
-title: Context-Aware Agent
----
-
-The Context-Aware Agent enhances the capabilities of standard LLM agents by incorporating relevant context from a retriever for each query. This allows the agent to provide more informed and specific responses based on the available information.
-
-## Usage
-
-Here's a simple example of how to use the Context-Aware Agent:
-
-```typescript
-import {
-  Document,
-  VectorStoreIndex,
-} from "llamaindex";
-import { OpenAI, OpenAIContextAwareAgent } from "@llamaindex/openai";
-
-async function createContextAwareAgent() {
-  // Create and index some documents
-  const documents = [
-    new Document({
-      text: "LlamaIndex is a data framework for LLM applications.",
-      id_: "doc1",
-    }),
-    new Document({
-      text: "The Eiffel Tower is located in Paris, France.",
-      id_: "doc2",
-    }),
-  ];
-
-  const index = await VectorStoreIndex.fromDocuments(documents);
-  const retriever = index.asRetriever({ similarityTopK: 1 });
-
-  // Create the Context-Aware Agent
-  const agent = new OpenAIContextAwareAgent({
-    llm: new OpenAI({ model: "gpt-3.5-turbo" }),
-    contextRetriever: retriever,
-  });
-
-  // Use the agent to answer queries
-  const response = await agent.chat({
-    message: "What is LlamaIndex used for?",
-  });
-
-  console.log("Agent Response:", response.response);
-}
-
-createContextAwareAgent().catch(console.error);
-```
-
-In this example, the Context-Aware Agent uses the retriever to fetch relevant context for each query, allowing it to provide more accurate and informed responses based on the indexed documents.
-
-## Key Components
-
-- `contextRetriever`: A retriever (e.g., from a VectorStoreIndex) that fetches relevant documents or passages for each query.
-
-## Available Context-Aware Agents
-
-- `OpenAIContextAwareAgent`: A context-aware agent using OpenAI's models.
diff --git a/apps/next/src/content/docs/llamaindex/examples/meta.json b/apps/next/src/content/docs/llamaindex/examples/meta.json
deleted file mode 100644
index f432a3b77..000000000
--- a/apps/next/src/content/docs/llamaindex/examples/meta.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
-  "title": "Examples",
-  "pages": [
-    "more_examples",
-    "chat_engine",
-    "vector_index",
-    "summary_index",
-    "save_load_index",
-    "context_aware_agent",
-    "agent",
-    "agent_gemini",
-    "local_llm",
-    "other_llms"
-  ]
-}
diff --git a/apps/next/src/content/docs/llamaindex/examples/other_llms.mdx b/apps/next/src/content/docs/llamaindex/examples/other_llms.mdx
deleted file mode 100644
index f2f4e4ae5..000000000
--- a/apps/next/src/content/docs/llamaindex/examples/other_llms.mdx
+++ /dev/null
@@ -1,66 +0,0 @@
----
-title: Using other LLM APIs
----
-
-import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock';
-import CodeSource from "!raw-loader!../../../../../../../examples/mistral";
-
-By default LlamaIndex.TS uses OpenAI's LLMs and embedding models, but we support [lots of other LLMs](../modules/llms) including models from Mistral (Mistral, Mixtral), Anthropic (Claude) and Google (Gemini).
-
-If you don't want to use an API at all you can [run a local model](./local_llm).
-
-This example runs you through the process of setting up a Mistral model:
-
-
-## Installation
-
-import { Tab, Tabs } from "fumadocs-ui/components/tabs";
-
-<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
-	```shell tab="npm"
-	npm install llamaindex @llamaindex/mistral
-	```
-
-	```shell tab="yarn"
-	yarn add llamaindex @llamaindex/mistral
-	```
-
-	```shell tab="pnpm"
-	pnpm add llamaindex @llamaindex/mistral
-	```
-</Tabs>
-
-## Using another LLM
-
-You can specify what LLM LlamaIndex.TS will use on the `Settings` object, like this:
-
-```typescript
-import { MistralAI } from "@llamaindex/mistral";
-import { Settings } from "llamaindex";
-
-Settings.llm = new MistralAI({
-  model: "mistral-tiny",
-  apiKey: "<YOUR_API_KEY>",
-});
-```
-
-You can see examples of other APIs we support by checking out "Available LLMs" in the sidebar of our [LLMs section](../modules/llms).
-
-## Using another embedding model
-
-A frequent gotcha when trying to use a different API as your LLM is that LlamaIndex will also by default index and embed your data using OpenAI's embeddings. To completely switch away from OpenAI you will need to set your embedding model as well, for example:
-
-```typescript
-import { MistralAIEmbedding } from "@llamaindex/mistral";
-import { Settings } from "llamaindex";
-
-Settings.embedModel = new MistralAIEmbedding();
-```
-
-We support [many different embeddings](../modules/embeddings).
-
-## Full example
-
-This example uses Mistral's `mistral-tiny` model as the LLM and Mistral for embeddings as well.
-
-<DynamicCodeBlock lang="ts" code={CodeSource} />
diff --git a/apps/next/src/content/docs/llamaindex/examples/save_load_index.mdx b/apps/next/src/content/docs/llamaindex/examples/save_load_index.mdx
deleted file mode 100644
index bce10b9db..000000000
--- a/apps/next/src/content/docs/llamaindex/examples/save_load_index.mdx
+++ /dev/null
@@ -1,8 +0,0 @@
----
-title: Save/Load an Index
----
-
-import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock';
-import CodeSource from "!raw-loader!../../../../../../../examples/storageContext";
-
-<DynamicCodeBlock lang="ts" code={CodeSource} />
diff --git a/apps/next/src/content/docs/llamaindex/examples/summary_index.mdx b/apps/next/src/content/docs/llamaindex/examples/summary_index.mdx
deleted file mode 100644
index 344ce6fe8..000000000
--- a/apps/next/src/content/docs/llamaindex/examples/summary_index.mdx
+++ /dev/null
@@ -1,8 +0,0 @@
----
-title: Summary Index
----
-
-import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock';
-import CodeSource from "!raw-loader!../../../../../../../examples/summaryIndex";
-
-<DynamicCodeBlock lang="ts" code={CodeSource} />
diff --git a/apps/next/src/content/docs/llamaindex/examples/vector_index.mdx b/apps/next/src/content/docs/llamaindex/examples/vector_index.mdx
deleted file mode 100644
index 03c16fd95..000000000
--- a/apps/next/src/content/docs/llamaindex/examples/vector_index.mdx
+++ /dev/null
@@ -1,8 +0,0 @@
----
-title: Vector Index
----
-
-import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock';
-import CodeSource from "!raw-loader!../../../../../../../examples/vectorIndex";
-
-<DynamicCodeBlock lang="ts" code={CodeSource} />
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/chatbot.mdx b/apps/next/src/content/docs/llamaindex/getting_started/create_llama.mdx
similarity index 73%
rename from apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/chatbot.mdx
rename to apps/next/src/content/docs/llamaindex/getting_started/create_llama.mdx
index 867211893..99260dc66 100644
--- a/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/chatbot.mdx
+++ b/apps/next/src/content/docs/llamaindex/getting_started/create_llama.mdx
@@ -1,11 +1,7 @@
 ---
-title: Chatbot tutorial
+title: Create-Llama
 ---
 
-Once you've mastered basic [retrieval-augment generation](retrieval_augmented_generation) you may want to create an interface to chat with your data. You can do this step-by-step, but we recommend getting started quickly using `create-llama`.
-
-## Using create-llama
-
 `create-llama` is a powerful but easy to use command-line tool that generates a working, full-stack web application that allows you to chat with your data. You can learn more about it on [the `create-llama` README page](https://www.npmjs.com/package/create-llama).
 
 Run it once and it will ask you a series of questions about the kind of application you want to generate. Then you can customize your application to suit your use-case. To get started, run:
diff --git a/apps/next/src/content/docs/llamaindex/examples/more_examples.mdx b/apps/next/src/content/docs/llamaindex/getting_started/examples.mdx
similarity index 63%
rename from apps/next/src/content/docs/llamaindex/examples/more_examples.mdx
rename to apps/next/src/content/docs/llamaindex/getting_started/examples.mdx
index ef7a02754..31db581bc 100644
--- a/apps/next/src/content/docs/llamaindex/examples/more_examples.mdx
+++ b/apps/next/src/content/docs/llamaindex/getting_started/examples.mdx
@@ -1,10 +1,10 @@
 ---
-title: See all examples
+title: Code examples
 ---
 
 Our GitHub repository has a wealth of examples to explore and try out. You can check out our [examples folder](https://github.com/run-llama/LlamaIndexTS/tree/main/examples) to see them all at once, or browse the pages in this section for some selected highlights.
 
-## Check out all examples
+## Use examples locally
 
 It may be useful to check out all the examples at once so you can try them out locally. To do this into a folder called `my-new-project`, run these commands:
 
@@ -19,3 +19,14 @@ Then you can run any example in the folder with `tsx`, e.g.:
 ```bash npm2yarn
 npx tsx ./vectorIndex.ts
 ```
+
+## Try examples online
+
+You can also try the examples online using StackBlitz:
+
+<iframe
+  className="w-full h-[440px]"
+  aria-label="LlamaIndex.TS Examples"
+  aria-description="This is a list of examples for LlamaIndex.TS."
+  src="https://stackblitz.com/github/run-llama/LlamaIndexTS/tree/main/examples?file=README.md"
+/>
\ No newline at end of file
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/setup/cloudflare.mdx b/apps/next/src/content/docs/llamaindex/getting_started/frameworks/cloudflare.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/getting_started/setup/cloudflare.mdx
rename to apps/next/src/content/docs/llamaindex/getting_started/frameworks/cloudflare.mdx
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/setup/index.mdx b/apps/next/src/content/docs/llamaindex/getting_started/frameworks/index.mdx
similarity index 97%
rename from apps/next/src/content/docs/llamaindex/getting_started/setup/index.mdx
rename to apps/next/src/content/docs/llamaindex/getting_started/frameworks/index.mdx
index aa4a01253..db7836ab0 100644
--- a/apps/next/src/content/docs/llamaindex/getting_started/setup/index.mdx
+++ b/apps/next/src/content/docs/llamaindex/getting_started/frameworks/index.mdx
@@ -1,5 +1,5 @@
 ---
-title: Choose Framework
+title: Frameworks
 description: We support multiple JS runtime and frameworks, bundlers.
 ---
 import {
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/frameworks/meta.json b/apps/next/src/content/docs/llamaindex/getting_started/frameworks/meta.json
new file mode 100644
index 000000000..0b6f3902e
--- /dev/null
+++ b/apps/next/src/content/docs/llamaindex/getting_started/frameworks/meta.json
@@ -0,0 +1,6 @@
+{
+  "title": "Framework",
+  "description": "The setup guide",
+  "defaultOpen": true,
+  "pages": ["node", "typescript", "next", "vite", "cloudflare"]
+}
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/setup/next.mdx b/apps/next/src/content/docs/llamaindex/getting_started/frameworks/next.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/getting_started/setup/next.mdx
rename to apps/next/src/content/docs/llamaindex/getting_started/frameworks/next.mdx
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/setup/node.mdx b/apps/next/src/content/docs/llamaindex/getting_started/frameworks/node.mdx
similarity index 97%
rename from apps/next/src/content/docs/llamaindex/getting_started/setup/node.mdx
rename to apps/next/src/content/docs/llamaindex/getting_started/frameworks/node.mdx
index de48fa881..d9d56c243 100644
--- a/apps/next/src/content/docs/llamaindex/getting_started/setup/node.mdx
+++ b/apps/next/src/content/docs/llamaindex/getting_started/frameworks/node.mdx
@@ -42,7 +42,7 @@ By the default, we are using `js-tiktoken` for tokenization. You can install `gp
 	```
 </Tabs>
 
-> Note: This only works for Node.js
+**Note**: This only works for Node.js
 
 ## TypeScript support
 
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/setup/typescript.mdx b/apps/next/src/content/docs/llamaindex/getting_started/frameworks/typescript.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/getting_started/setup/typescript.mdx
rename to apps/next/src/content/docs/llamaindex/getting_started/frameworks/typescript.mdx
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/setup/vite.mdx b/apps/next/src/content/docs/llamaindex/getting_started/frameworks/vite.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/getting_started/setup/vite.mdx
rename to apps/next/src/content/docs/llamaindex/getting_started/frameworks/vite.mdx
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/images/create_llama.png b/apps/next/src/content/docs/llamaindex/getting_started/images/create_llama.png
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/images/create_llama.png
rename to apps/next/src/content/docs/llamaindex/getting_started/images/create_llama.png
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/index.mdx b/apps/next/src/content/docs/llamaindex/getting_started/index.mdx
index 27cf41052..184abdd1e 100644
--- a/apps/next/src/content/docs/llamaindex/getting_started/index.mdx
+++ b/apps/next/src/content/docs/llamaindex/getting_started/index.mdx
@@ -37,20 +37,20 @@ In most cases, you'll also need an LLM package to use LlamaIndex. For example, t
 	```
 </Tabs>
 
-Go to [Using other LLM APIs](/docs/llamaindex/examples/other_llms) to find out how to use other LLMs.
+Go to [LLM APIs](/docs/llamaindex/modules/llms) to find out how to use other LLMs.
 
 
 ## What's next?
 
 <Cards>
 	<Card
-		title="I want to try LlamaIndex.TS"
-		description="Learn how to use LlamaIndex.TS with different JS runtime and frameworks."
-		href="/docs/llamaindex/getting_started/setup"
+		title="Learn LlamaIndex.TS"
+		description="Learn how to use LlamaIndex.TS by starting with one of our tutorials."
+		href="/docs/llamaindex/tutorials/rag"
 	/>
 	<Card
 		title="Show me code examples"
 		description="Explore code examples using LlamaIndex.TS."
-		href="https://stackblitz.com/github/run-llama/LlamaIndexTS/tree/main/examples?file=README.md"
+		href="/docs/llamaindex/getting_started/examples"
 	/>
 </Cards>
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/meta.json b/apps/next/src/content/docs/llamaindex/getting_started/meta.json
index ed2c8903e..62446394b 100644
--- a/apps/next/src/content/docs/llamaindex/getting_started/meta.json
+++ b/apps/next/src/content/docs/llamaindex/getting_started/meta.json
@@ -1,4 +1,4 @@
 {
   "title": "Getting Started",
-  "pages": ["index", "setup", "starter_tutorial", "environments", "concepts"]
+  "pages": ["index", "create_llama", "examples", "frameworks"]
 }
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/setup/meta.json b/apps/next/src/content/docs/llamaindex/getting_started/setup/meta.json
deleted file mode 100644
index 2a5e97a34..000000000
--- a/apps/next/src/content/docs/llamaindex/getting_started/setup/meta.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
-  "title": "Setup",
-  "description": "The setup guide",
-  "defaultOpen": true,
-  "pages": ["index", "next", "node", "typescript", "vite", "cloudflare"]
-}
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/meta.json b/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/meta.json
deleted file mode 100644
index 1ea6d9295..000000000
--- a/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/meta.json
+++ /dev/null
@@ -1,9 +0,0 @@
-{
-  "title": "Starter Tutorials",
-  "pages": [
-    "retrieval_augmented_generation",
-    "chatbot",
-    "structured_data_extraction",
-    "agent"
-  ]
-}
diff --git a/apps/next/src/content/docs/llamaindex/guide/cost-analysis.mdx b/apps/next/src/content/docs/llamaindex/guide/cost-analysis.mdx
deleted file mode 100644
index bab0a7523..000000000
--- a/apps/next/src/content/docs/llamaindex/guide/cost-analysis.mdx
+++ /dev/null
@@ -1,16 +0,0 @@
----
-title: Cost Analysis
----
-
-This page shows how to track LLM cost using APIs.
-
-## Callback Manager
-
-The callback manager is a class that manages the callback functions.
-
-You can register `llm-start`, `llm-end`, and `llm-stream` callbacks to the callback manager for tracking the cost.
-
-import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock';
-import CodeSource from "!raw-loader!../../../../../../../examples/recipes/cost-analysis";
-
-<DynamicCodeBlock lang="ts" code={CodeSource} />
diff --git a/apps/next/src/content/docs/llamaindex/guide/meta.json b/apps/next/src/content/docs/llamaindex/guide/meta.json
deleted file mode 100644
index b5d74a767..000000000
--- a/apps/next/src/content/docs/llamaindex/guide/meta.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
-  "title": "Guide",
-  "description": "See our guide",
-  "pages": ["loading", "workflow", "chat", "agents", "cost-analysis"]
-}
diff --git a/apps/next/src/content/docs/llamaindex/index.mdx b/apps/next/src/content/docs/llamaindex/index.mdx
index 2ef5f4e88..fe5dc0e41 100644
--- a/apps/next/src/content/docs/llamaindex/index.mdx
+++ b/apps/next/src/content/docs/llamaindex/index.mdx
@@ -16,9 +16,13 @@ The TypeScript implementation is designed for JavaScript server side application
 
 LlamaIndex.TS provides tools for beginners, advanced users, and everyone in between.
 
+Try it out with a starter example using StackBlitz:
+
 <iframe
   className="w-full h-[440px]"
   aria-label="LlamaIndex.TS Starter"
   aria-description="This is a starter example for LlamaIndex.TS, it shows the basic usage of the library."
   src="https://stackblitz.com/github/run-llama/LlamaIndexTS/tree/main/examples?embed=1&file=starter.ts"
 />
+
+You'll need an OpenAI API key to run this example. You can retrieve it from [OpenAI](https://platform.openai.com/api-keys).
\ No newline at end of file
diff --git a/apps/next/src/content/docs/llamaindex/meta.json b/apps/next/src/content/docs/llamaindex/meta.json
index 60e633950..6439cab03 100644
--- a/apps/next/src/content/docs/llamaindex/meta.json
+++ b/apps/next/src/content/docs/llamaindex/meta.json
@@ -4,13 +4,11 @@
   "root": true,
   "pages": [
     "---Guide---",
-    "what-is-llamaindex",
     "index",
     "getting_started",
-    "migration",
-    "guide",
-    "examples",
+    "tutorials",
     "modules",
-    "integration"
+    "integration",
+    "migration"
   ]
 }
diff --git a/apps/next/src/content/docs/llamaindex/modules/agent/index.mdx b/apps/next/src/content/docs/llamaindex/migration/deprecated/agent/index.mdx
similarity index 91%
rename from apps/next/src/content/docs/llamaindex/modules/agent/index.mdx
rename to apps/next/src/content/docs/llamaindex/migration/deprecated/agent/index.mdx
index de6510976..f3c7d6122 100644
--- a/apps/next/src/content/docs/llamaindex/modules/agent/index.mdx
+++ b/apps/next/src/content/docs/llamaindex/migration/deprecated/agent/index.mdx
@@ -2,6 +2,8 @@
 title: Agents
 ---
 
+**Note**: Agents are deprecated, use [Agent Workflows](/docs/llamaindex/modules/agent_workflow) instead.
+
 An “agent” is an automated reasoning and decision engine. It takes in a user input/query and can make internal decisions for executing that query in order to return the correct result. The key agent components can include, but are not limited to:
 
 - Breaking down a complex question into smaller ones
diff --git a/apps/next/src/content/docs/llamaindex/migration/meta.json b/apps/next/src/content/docs/llamaindex/migration/meta.json
index 7be5abb90..19bded9a4 100644
--- a/apps/next/src/content/docs/llamaindex/migration/meta.json
+++ b/apps/next/src/content/docs/llamaindex/migration/meta.json
@@ -1,5 +1,5 @@
 {
   "title": "Migration",
   "description": "Migration between different versions",
-  "pages": ["0.8-to-0.9"]
+  "pages": ["0.8-to-0.9", "deprecated"]
 }
diff --git a/apps/next/src/content/docs/llamaindex/modules/agent_workflow.mdx b/apps/next/src/content/docs/llamaindex/modules/agent_workflow.mdx
index be27b4ebd..27b5d6495 100644
--- a/apps/next/src/content/docs/llamaindex/modules/agent_workflow.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/agent_workflow.mdx
@@ -1,9 +1,9 @@
 ---
-title: Agent Workflow
+title: Agent Workflows
 ---
 
 
-Agent Workflows are a powerful system that enables you to create and orchestrate one or multiple agents with tools to perform specific tasks. It's built on top of the base `Workflow` system and provides a streamlined interface for agent interactions.
+Agent Workflows are a powerful system that enables you to create and orchestrate one or multiple agents with tools to perform specific tasks. It's built on top of the base [`Workflow`](./workflows) system and provides a streamlined interface for agent interactions.
 
 ## Usage
 
diff --git a/apps/next/src/content/docs/llamaindex/guide/chat/chat.mdx b/apps/next/src/content/docs/llamaindex/modules/chat/chat.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/guide/chat/chat.mdx
rename to apps/next/src/content/docs/llamaindex/modules/chat/chat.mdx
diff --git a/apps/next/src/content/docs/llamaindex/guide/chat/install.mdx b/apps/next/src/content/docs/llamaindex/modules/chat/install.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/guide/chat/install.mdx
rename to apps/next/src/content/docs/llamaindex/modules/chat/install.mdx
diff --git a/apps/next/src/content/docs/llamaindex/guide/chat/meta.json b/apps/next/src/content/docs/llamaindex/modules/chat/meta.json
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/guide/chat/meta.json
rename to apps/next/src/content/docs/llamaindex/modules/chat/meta.json
diff --git a/apps/next/src/content/docs/llamaindex/guide/chat/rsc.mdx b/apps/next/src/content/docs/llamaindex/modules/chat/rsc.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/guide/chat/rsc.mdx
rename to apps/next/src/content/docs/llamaindex/modules/chat/rsc.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/deepinfra.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/deepinfra.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/deepinfra.mdx
rename to apps/next/src/content/docs/llamaindex/modules/embeddings/deepinfra.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/gemini.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/gemini.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/gemini.mdx
rename to apps/next/src/content/docs/llamaindex/modules/embeddings/gemini.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/huggingface.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/huggingface.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/huggingface.mdx
rename to apps/next/src/content/docs/llamaindex/modules/embeddings/huggingface.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/index.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/index.mdx
index 2ef16f935..d03054228 100644
--- a/apps/next/src/content/docs/llamaindex/modules/embeddings/index.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/embeddings/index.mdx
@@ -4,7 +4,7 @@ title: Embedding
 
 The embedding model in LlamaIndex is responsible for creating numerical representations of text. By default, LlamaIndex will use the `text-embedding-ada-002` model from OpenAI.
 
-This can be explicitly updated through `Settings`
+This can be explicitly updated through `Settings.embedModel`.
 
 ## Installation
 
@@ -35,7 +35,7 @@ Settings.embedModel = new OpenAIEmbedding({
 
 ## Local Embedding
 
-For local embeddings, you can use the [HuggingFace](/docs/llamaindex/modules/embeddings/available_embeddings/huggingface) embedding model.
+For local embeddings, you can use the [HuggingFace](/docs/llamaindex/modules/embeddings/huggingface) embedding model.
 
 ## Local Ollama Embeddings With Remote Host
 
diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/jinaai.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/jinaai.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/jinaai.mdx
rename to apps/next/src/content/docs/llamaindex/modules/embeddings/jinaai.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/mistral.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/mistral.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/mistral.mdx
rename to apps/next/src/content/docs/llamaindex/modules/embeddings/mistral.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/mixedbreadai.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/mixedbreadai.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/mixedbreadai.mdx
rename to apps/next/src/content/docs/llamaindex/modules/embeddings/mixedbreadai.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/ollama.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/ollama.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/ollama.mdx
rename to apps/next/src/content/docs/llamaindex/modules/embeddings/ollama.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/openai.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/openai.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/openai.mdx
rename to apps/next/src/content/docs/llamaindex/modules/embeddings/openai.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/together.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/together.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/together.mdx
rename to apps/next/src/content/docs/llamaindex/modules/embeddings/together.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/voyageai.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/voyageai.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/voyageai.mdx
rename to apps/next/src/content/docs/llamaindex/modules/embeddings/voyageai.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/anthropic.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/anthropic.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/llms/available_llms/anthropic.mdx
rename to apps/next/src/content/docs/llamaindex/modules/llms/anthropic.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/azure.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/azure.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/llms/available_llms/azure.mdx
rename to apps/next/src/content/docs/llamaindex/modules/llms/azure.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/bedrock.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/bedrock.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/llms/available_llms/bedrock.mdx
rename to apps/next/src/content/docs/llamaindex/modules/llms/bedrock.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/deepinfra.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/deepinfra.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/llms/available_llms/deepinfra.mdx
rename to apps/next/src/content/docs/llamaindex/modules/llms/deepinfra.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/deepseek.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/deepseek.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/llms/available_llms/deepseek.mdx
rename to apps/next/src/content/docs/llamaindex/modules/llms/deepseek.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/fireworks.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/fireworks.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/llms/available_llms/fireworks.mdx
rename to apps/next/src/content/docs/llamaindex/modules/llms/fireworks.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/gemini.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/gemini.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/llms/available_llms/gemini.mdx
rename to apps/next/src/content/docs/llamaindex/modules/llms/gemini.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/groq.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/groq.mdx
similarity index 95%
rename from apps/next/src/content/docs/llamaindex/modules/llms/available_llms/groq.mdx
rename to apps/next/src/content/docs/llamaindex/modules/llms/groq.mdx
index a570a1fef..f8f17619d 100644
--- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/groq.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/llms/groq.mdx
@@ -3,7 +3,7 @@ title: Groq
 ---
 
 import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock';
-import CodeSource from "!raw-loader!../../../../../../../../../examples/groq.ts";
+import CodeSource from "!raw-loader!../../../../../../../../examples/groq.ts";
 
 ## Installation
 
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/index.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/index.mdx
index 1ee9b636d..d33a577f1 100644
--- a/apps/next/src/content/docs/llamaindex/modules/llms/index.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/llms/index.mdx
@@ -45,7 +45,7 @@ export AZURE_OPENAI_DEPLOYMENT="gpt-4" # or some other deployment name
 
 ## Local LLM
 
-For local LLMs, currently we recommend the use of [Ollama](/docs/llamaindex/modules/llms/available_llms/ollama) LLM.
+For local LLMs, currently we recommend the use of [Ollama](/docs/llamaindex/modules/llms/ollama) LLM.
 
 ## Available LLMs
 
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/llama2.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/llama2.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/llms/available_llms/llama2.mdx
rename to apps/next/src/content/docs/llamaindex/modules/llms/llama2.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/mistral.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/mistral.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/llms/available_llms/mistral.mdx
rename to apps/next/src/content/docs/llamaindex/modules/llms/mistral.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/ollama.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/ollama.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/llms/available_llms/ollama.mdx
rename to apps/next/src/content/docs/llamaindex/modules/llms/ollama.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/openai.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/openai.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/llms/available_llms/openai.mdx
rename to apps/next/src/content/docs/llamaindex/modules/llms/openai.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/perplexity.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/perplexity.mdx
similarity index 87%
rename from apps/next/src/content/docs/llamaindex/modules/llms/available_llms/perplexity.mdx
rename to apps/next/src/content/docs/llamaindex/modules/llms/perplexity.mdx
index f9648ac00..c9ae988f0 100644
--- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/perplexity.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/llms/perplexity.mdx
@@ -1,8 +1,26 @@
 ---
 title: Perplexity LLM
 ---
-## Usage
 
+## Installation
+
+import { Tab, Tabs } from "fumadocs-ui/components/tabs";
+
+<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
+	```shell tab="npm"
+	npm install @llamaindex/perplexity
+	```
+
+	```shell tab="yarn"
+	yarn add @llamaindex/perplexity
+	```
+
+	```shell tab="pnpm"
+	pnpm add @llamaindex/perplexity
+	```
+</Tabs>
+
+## Usage
 
 ```ts
 import { Settings } from "llamaindex";
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/portkey.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/portkey.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/modules/llms/available_llms/portkey.mdx
rename to apps/next/src/content/docs/llamaindex/modules/llms/portkey.mdx
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/together.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/together.mdx
similarity index 94%
rename from apps/next/src/content/docs/llamaindex/modules/llms/available_llms/together.mdx
rename to apps/next/src/content/docs/llamaindex/modules/llms/together.mdx
index a9877cfa3..af3307f30 100644
--- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/together.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/llms/together.mdx
@@ -8,15 +8,15 @@ import { Tab, Tabs } from "fumadocs-ui/components/tabs";
 
 <Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist>
 	```shell tab="npm"
-	npm install llamaindex
+	npm install @llamaindex/together
 	```
 
 	```shell tab="yarn"
-	yarn add llamaindex
+	yarn add @llamaindex/together
 	```
 
 	```shell tab="pnpm"
-	pnpm add llamaindex
+	pnpm add @llamaindex/together
 	```
 </Tabs>
 
diff --git a/apps/next/src/content/docs/llamaindex/guide/loading/index.mdx b/apps/next/src/content/docs/llamaindex/modules/loading/index.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/guide/loading/index.mdx
rename to apps/next/src/content/docs/llamaindex/modules/loading/index.mdx
diff --git a/apps/next/src/content/docs/llamaindex/guide/loading/meta.json b/apps/next/src/content/docs/llamaindex/modules/loading/meta.json
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/guide/loading/meta.json
rename to apps/next/src/content/docs/llamaindex/modules/loading/meta.json
diff --git a/apps/next/src/content/docs/llamaindex/guide/loading/node-parser.mdx b/apps/next/src/content/docs/llamaindex/modules/loading/node-parser.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/guide/loading/node-parser.mdx
rename to apps/next/src/content/docs/llamaindex/modules/loading/node-parser.mdx
diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/1_setup.mdx b/apps/next/src/content/docs/llamaindex/tutorials/agents/1_setup.mdx
similarity index 98%
rename from apps/next/src/content/docs/llamaindex/guide/agents/1_setup.mdx
rename to apps/next/src/content/docs/llamaindex/tutorials/agents/1_setup.mdx
index 5a35bd322..e8bab98fd 100644
--- a/apps/next/src/content/docs/llamaindex/guide/agents/1_setup.mdx
+++ b/apps/next/src/content/docs/llamaindex/tutorials/agents/1_setup.mdx
@@ -1,5 +1,5 @@
 ---
-title: Agent tutorial
+title: 1. Setup
 ---
 
 In this guide we'll walk you through the process of building an Agent in JavaScript using the LlamaIndex.TS library, starting from nothing and adding complexity in stages.
diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/2_create_agent.mdx b/apps/next/src/content/docs/llamaindex/tutorials/agents/2_create_agent.mdx
similarity index 99%
rename from apps/next/src/content/docs/llamaindex/guide/agents/2_create_agent.mdx
rename to apps/next/src/content/docs/llamaindex/tutorials/agents/2_create_agent.mdx
index a74832e2f..0914f8a48 100644
--- a/apps/next/src/content/docs/llamaindex/guide/agents/2_create_agent.mdx
+++ b/apps/next/src/content/docs/llamaindex/tutorials/agents/2_create_agent.mdx
@@ -1,5 +1,5 @@
 ---
-title: Create a basic agent
+title: 2. Create a basic agent
 ---
 
 We want to use `await` so we're going to wrap all of our code in a `main` function, like this:
diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/3_local_model.mdx b/apps/next/src/content/docs/llamaindex/tutorials/agents/3_local_model.mdx
similarity index 97%
rename from apps/next/src/content/docs/llamaindex/guide/agents/3_local_model.mdx
rename to apps/next/src/content/docs/llamaindex/tutorials/agents/3_local_model.mdx
index 25b8c01ce..1224356a3 100644
--- a/apps/next/src/content/docs/llamaindex/guide/agents/3_local_model.mdx
+++ b/apps/next/src/content/docs/llamaindex/tutorials/agents/3_local_model.mdx
@@ -1,5 +1,5 @@
 ---
-title: Using a local model via Ollama
+title: 3. Using a local model via Ollama
 ---
 
 If you're happy using OpenAI, you can skip this section, but many people are interested in using models they run themselves. The easiest way to do this is via the great work of our friends at [Ollama](https://ollama.com/), who provide a simple to use client that will download, install and run a [growing range of models](https://ollama.com/library) for you.
diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/4_agentic_rag.mdx b/apps/next/src/content/docs/llamaindex/tutorials/agents/4_agentic_rag.mdx
similarity index 71%
rename from apps/next/src/content/docs/llamaindex/guide/agents/4_agentic_rag.mdx
rename to apps/next/src/content/docs/llamaindex/tutorials/agents/4_agentic_rag.mdx
index 76e9fa0fe..d5473554e 100644
--- a/apps/next/src/content/docs/llamaindex/guide/agents/4_agentic_rag.mdx
+++ b/apps/next/src/content/docs/llamaindex/tutorials/agents/4_agentic_rag.mdx
@@ -1,5 +1,5 @@
 ---
-title: Adding Retrieval-Augmented Generation (RAG)
+title: 4. Adding Retrieval-Augmented Generation (RAG)
 ---
 
 While an agent that can perform math is nifty (LLMs are usually not very good at math), LLM-based applications are always more interesting when they work with large amounts of data. In this case, we're going to use a 200-page PDF of the proposed budget of the city of San Francisco for fiscal years 2024-2024 and 2024-2025. It's a great example because it's extremely wordy and full of tables of figures, which present a challenge for humans and LLMs alike.
@@ -87,32 +87,9 @@ By default LlamaIndex will retrieve just the 2 most relevant chunks of text. Thi
 retriever.similarityTopK = 10;
 ```
 
-### Approach 1: Create a Context-Aware Agent
+### Use index.queryTool
 
-With the retriever ready, you can create a **context-aware agent**.
-
-```javascript
-const agent = new OpenAIContextAwareAgent({
-  contextRetriever: retriever,
-});
-
-// Example query to the context-aware agent
-let response = await agent.chat({
-  message: `What's the budget of San Francisco in 2023-2024?`,
-});
-
-console.log(response);
-```
-
-**Expected Output:**
-
-```md
-The total budget for the City and County of San Francisco for the fiscal year 2023-2024 is $14.6 billion. This represents a $611.8 million, or 4.4 percent, increase over the previous fiscal year's budget. The budget covers various expenditures across different departments and services, including significant allocations to public works, transportation, commerce, public protection, and health services.
-```
-
-### Approach 2: Using QueryEngineTool (Alternative Approach)
-
-If you prefer more flexibility and don't mind additional complexity, you can create a `QueryEngineTool`. This approach allows you to define the query logic, providing a more tailored way to interact with the data, but note that it introduces a delay due to the extra tool call.
+`index.queryTool` creates a `QueryEngineTool` that can be used be an agent to query data from the index. 
 
 ```javascript
 const tools = [
@@ -125,9 +102,9 @@ const tools = [
 ];
 
 // Create an agent using the tools array
-const myAgent = agent({ tools });
+const ragAgent = agent({ tools });
 
-let toolResponse = await myAgent.run("What's the budget of San Francisco in 2023-2024?");
+let toolResponse = await ragAgent.run("What's the budget of San Francisco in 2023-2024?");
 
 console.log(toolResponse);
 ```
@@ -155,10 +132,4 @@ console.log(toolResponse);
 
 Once again we see a `toolResult`. You can see the query the LLM decided to send to the query engine ("total budget"), and the output the engine returned. In `response.message` you see that the LLM has returned the output from the tool almost verbatim, although it trimmed out the bit about 2024-2025 since we didn't ask about that year.
 
-### Comparison of Approaches
-
-The `OpenAIContextAwareAgent` approach simplifies the setup by allowing you to directly link the retriever to the agent, making it straightforward to access relevant context for your queries. This is ideal for situations where you want easy integration with existing data sources, like a context chat engine.
-
-On the other hand, using the `QueryEngineTool` offers more flexibility and power. This method allows for customization in how queries are constructed and executed, enabling you to query data from various storages and process them in different ways. However, this added flexibility comes with increased complexity and response time due to the separate tool call and queryEngine generating tool output by LLM that is then passed to the agent.
-
 So now we have an agent that can index complicated documents and answer questions about them. Let's [combine our math agent and our RAG agent](5_rag_and_tools)!
diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/5_rag_and_tools.mdx b/apps/next/src/content/docs/llamaindex/tutorials/agents/5_rag_and_tools.mdx
similarity index 99%
rename from apps/next/src/content/docs/llamaindex/guide/agents/5_rag_and_tools.mdx
rename to apps/next/src/content/docs/llamaindex/tutorials/agents/5_rag_and_tools.mdx
index 0c23b4ae3..971bd33a9 100644
--- a/apps/next/src/content/docs/llamaindex/guide/agents/5_rag_and_tools.mdx
+++ b/apps/next/src/content/docs/llamaindex/tutorials/agents/5_rag_and_tools.mdx
@@ -1,5 +1,5 @@
 ---
-title: A RAG agent that does math
+title: 5. A RAG agent that does math
 ---
 
 In [our third iteration of the agent](https://github.com/run-llama/ts-agents/blob/main/3_rag_and_tools/agent.ts) we've combined the two previous agents, so we've defined both `sumNumbers` and a `QueryEngineTool` and created an array of two tools. The tools support both Zod and JSON Schema for parameter definition:
diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/6_llamaparse.mdx b/apps/next/src/content/docs/llamaindex/tutorials/agents/6_llamaparse.mdx
similarity index 97%
rename from apps/next/src/content/docs/llamaindex/guide/agents/6_llamaparse.mdx
rename to apps/next/src/content/docs/llamaindex/tutorials/agents/6_llamaparse.mdx
index 1eb845b95..fcdf0aa81 100644
--- a/apps/next/src/content/docs/llamaindex/guide/agents/6_llamaparse.mdx
+++ b/apps/next/src/content/docs/llamaindex/tutorials/agents/6_llamaparse.mdx
@@ -1,5 +1,5 @@
 ---
-title: Adding LlamaParse
+title: 6. Adding LlamaParse
 ---
 
 Complicated PDFs can be very tricky for LLMs to understand. To help with this, LlamaIndex provides LlamaParse, a hosted service that parses complex documents including PDFs. To use it, get a `LLAMA_CLOUD_API_KEY` by [signing up for LlamaCloud](https://cloud.llamaindex.ai/) (it's free for up to 1000 pages/day) and adding it to your `.env` file just as you did for your OpenAI key:
diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/7_qdrant.mdx b/apps/next/src/content/docs/llamaindex/tutorials/agents/7_qdrant.mdx
similarity index 98%
rename from apps/next/src/content/docs/llamaindex/guide/agents/7_qdrant.mdx
rename to apps/next/src/content/docs/llamaindex/tutorials/agents/7_qdrant.mdx
index eb3c45005..1c0f723b3 100644
--- a/apps/next/src/content/docs/llamaindex/guide/agents/7_qdrant.mdx
+++ b/apps/next/src/content/docs/llamaindex/tutorials/agents/7_qdrant.mdx
@@ -1,5 +1,5 @@
 ---
-title: Adding persistent vector storage
+title: 7. Adding persistent vector storage
 ---
 
 In the previous examples, we've been loading our data into memory each time we run the agent. This is fine for small datasets, but for larger datasets you'll want to store your embeddings in a database. LlamaIndex.TS provides a `VectorStore` class that can store your embeddings in a variety of databases. We're going to use [Qdrant](https://qdrant.tech/), a popular vector store, for this example.
diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/images/agent_flow.png b/apps/next/src/content/docs/llamaindex/tutorials/agents/images/agent_flow.png
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/guide/agents/images/agent_flow.png
rename to apps/next/src/content/docs/llamaindex/tutorials/agents/images/agent_flow.png
diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/meta.json b/apps/next/src/content/docs/llamaindex/tutorials/agents/meta.json
similarity index 84%
rename from apps/next/src/content/docs/llamaindex/guide/agents/meta.json
rename to apps/next/src/content/docs/llamaindex/tutorials/agents/meta.json
index 081292241..5579c55f2 100644
--- a/apps/next/src/content/docs/llamaindex/guide/agents/meta.json
+++ b/apps/next/src/content/docs/llamaindex/tutorials/agents/meta.json
@@ -1,5 +1,5 @@
 {
-  "title": "Agents",
+  "title": "Agent with RAG",
   "pages": [
     "1_setup",
     "2_create_agent",
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/agent.mdx b/apps/next/src/content/docs/llamaindex/tutorials/basic_agent.mdx
similarity index 75%
rename from apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/agent.mdx
rename to apps/next/src/content/docs/llamaindex/tutorials/basic_agent.mdx
index ffa84aa94..85377bee5 100644
--- a/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/agent.mdx
+++ b/apps/next/src/content/docs/llamaindex/tutorials/basic_agent.mdx
@@ -1,11 +1,11 @@
 ---
-title: Agent tutorial
+title: Basic Agent 
 ---
 
 import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock';
-import CodeSource from "!raw-loader!../../../../../../../../examples/agent/openai";
+import CodeSource from "!raw-loader!../../../../../../../examples/agent/openai";
 
-We have a comprehensive, step-by-step [guide to building agents in LlamaIndex.TS](../../guides/agents/setup) that we recommend to learn what agents are and how to build them for production. But building a basic agent is simple:
+We have a comprehensive, step-by-step [guide to building agents in LlamaIndex.TS](./agents/1_setup) that we recommend to learn what agents are and how to build them for production. But building a basic agent is simple:
 
 ## Set up
 
diff --git a/apps/next/src/content/docs/llamaindex/examples/local_llm.mdx b/apps/next/src/content/docs/llamaindex/tutorials/local_llm.mdx
similarity index 92%
rename from apps/next/src/content/docs/llamaindex/examples/local_llm.mdx
rename to apps/next/src/content/docs/llamaindex/tutorials/local_llm.mdx
index 6faae94cb..abcf2f07b 100644
--- a/apps/next/src/content/docs/llamaindex/examples/local_llm.mdx
+++ b/apps/next/src/content/docs/llamaindex/tutorials/local_llm.mdx
@@ -4,7 +4,7 @@ title: Local LLMs
 
 import { Tab, Tabs } from "fumadocs-ui/components/tabs";
 
-LlamaIndex.TS supports OpenAI and [other remote LLM APIs](other_llms). You can also run a local LLM on your machine!
+LlamaIndex.TS supports OpenAI and [other remote LLM APIs](/docs/llamaindex/modules/llms). You can also run a local LLM on your machine!
 
 ## Using a local model via Ollama
 
@@ -45,7 +45,10 @@ To switch the LLM in your code, you first need to make sure to install the packa
 Then, to tell LlamaIndex to use a local LLM, use the `Settings` object:
 
 ```javascript
-Settings.llm = new Ollama({
+import { Settings } from "llamaindex";
+import { ollama } from "@llamaindex/ollama";
+
+Settings.llm = ollama({
   model: "mixtral:8x7b",
 });
 ```
diff --git a/apps/next/src/content/docs/llamaindex/tutorials/meta.json b/apps/next/src/content/docs/llamaindex/tutorials/meta.json
new file mode 100644
index 000000000..6145b9e27
--- /dev/null
+++ b/apps/next/src/content/docs/llamaindex/tutorials/meta.json
@@ -0,0 +1,12 @@
+{
+  "title": "Tutorials",
+  "pages": [
+    "rag",
+    "basic_agent",
+    "agents",
+    "workflow",
+    "local_llm",
+    "chatbot",
+    "structured_data_extraction"
+  ]
+}
diff --git a/apps/next/src/content/docs/llamaindex/_static/concepts/indexing.jpg b/apps/next/src/content/docs/llamaindex/tutorials/rag/_static/concepts/indexing.jpg
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/_static/concepts/indexing.jpg
rename to apps/next/src/content/docs/llamaindex/tutorials/rag/_static/concepts/indexing.jpg
diff --git a/apps/next/src/content/docs/llamaindex/_static/concepts/querying.jpg b/apps/next/src/content/docs/llamaindex/tutorials/rag/_static/concepts/querying.jpg
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/_static/concepts/querying.jpg
rename to apps/next/src/content/docs/llamaindex/tutorials/rag/_static/concepts/querying.jpg
diff --git a/apps/next/src/content/docs/llamaindex/_static/concepts/rag.jpg b/apps/next/src/content/docs/llamaindex/tutorials/rag/_static/concepts/rag.jpg
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/_static/concepts/rag.jpg
rename to apps/next/src/content/docs/llamaindex/tutorials/rag/_static/concepts/rag.jpg
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/concepts.mdx b/apps/next/src/content/docs/llamaindex/tutorials/rag/concepts.mdx
similarity index 97%
rename from apps/next/src/content/docs/llamaindex/getting_started/concepts.mdx
rename to apps/next/src/content/docs/llamaindex/tutorials/rag/concepts.mdx
index 4189c3204..99cc8423e 100644
--- a/apps/next/src/content/docs/llamaindex/getting_started/concepts.mdx
+++ b/apps/next/src/content/docs/llamaindex/tutorials/rag/concepts.mdx
@@ -16,7 +16,7 @@ LlamaIndex uses a two stage method when using an LLM with your data:
 1. **indexing stage**: preparing a knowledge base, and
 2. **querying stage**: retrieving relevant context from the knowledge to assist the LLM in responding to a question
 
-![](../_static/concepts/rag.jpg)
+![](./_static/concepts/rag.jpg)
 
 This process is also known as Retrieval Augmented Generation (RAG).
 
@@ -28,7 +28,7 @@ Let's explore each stage in detail.
 
 LlamaIndex.TS help you prepare the knowledge base with a suite of data connectors and indexes.
 
-![](../_static/concepts/indexing.jpg)
+![](./_static/concepts/indexing.jpg)
 
 [**Data Loaders**](/docs/llamaindex/modules/data_loaders/index):
 A data connector (i.e. `Reader`) ingest data from different data sources and data formats into a simple `Document` representation (text and simple metadata).
@@ -54,7 +54,7 @@ LlamaIndex provides composable modules that help you build and integrate RAG pip
 
 These building blocks can be customized to reflect ranking preferences, as well as composed to reason over multiple knowledge bases in a structured way.
 
-![](../_static/concepts/querying.jpg)
+![](./_static/concepts/querying.jpg)
 
 #### Building Blocks
 
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/retrieval_augmented_generation.mdx b/apps/next/src/content/docs/llamaindex/tutorials/rag/index.mdx
similarity index 89%
rename from apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/retrieval_augmented_generation.mdx
rename to apps/next/src/content/docs/llamaindex/tutorials/rag/index.mdx
index eef95d0e8..c253efaaf 100644
--- a/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/retrieval_augmented_generation.mdx
+++ b/apps/next/src/content/docs/llamaindex/tutorials/rag/index.mdx
@@ -1,12 +1,12 @@
 ---
-title: Retrieval Augmented Generation (RAG) Tutorial
+title: Retrieval Augmented Generation (RAG) 
 ---
 
 import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock';
 import CodeSource from "!raw-loader!../../../../../../../../examples/vectorIndex";
 import TSConfigSource from "!!raw-loader!../../../../../../../../examples/tsconfig.json";
 
-One of the most common use-cases for LlamaIndex is Retrieval-Augmented Generation or RAG, in which your data is indexed and selectively retrieved to be given to an LLM as source material for responding to a query. You can learn more about the [concepts behind RAG](../concepts).
+One of the most common use-cases for LlamaIndex is Retrieval-Augmented Generation or RAG, in which your data is indexed and selectively retrieved to be given to an LLM as source material for responding to a query. You can learn more about the [concepts behind RAG](./rag/concepts).
 
 ## Set up the project
 
@@ -19,7 +19,7 @@ npm install -D typescript @types/node
 
 Then, check out the [installation](../setup) steps to install LlamaIndex.TS and prepare an OpenAI key.
 
-You can use [other LLMs](../../examples/other_llms) via their APIs; if you would prefer to use local models check out our [local LLM example](../../examples/local_llm).
+You can use [other LLMs](/docs/llamaindex/modules/llms) via their APIs; if you would prefer to use local models check out our [local LLM example](./local_llm).
 
 ## Run queries
 
diff --git a/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/structured_data_extraction.mdx b/apps/next/src/content/docs/llamaindex/tutorials/structured_data_extraction.mdx
similarity index 80%
rename from apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/structured_data_extraction.mdx
rename to apps/next/src/content/docs/llamaindex/tutorials/structured_data_extraction.mdx
index e88d93efe..696ca66e3 100644
--- a/apps/next/src/content/docs/llamaindex/getting_started/starter_tutorial/structured_data_extraction.mdx
+++ b/apps/next/src/content/docs/llamaindex/tutorials/structured_data_extraction.mdx
@@ -1,13 +1,13 @@
 ---
-title: Structured data extraction tutorial
+title: Structured data extraction 
 ---
 
 import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock';
-import CodeSource from "!raw-loader!../../../../../../../../examples/jsonExtract";
+import CodeSource from "!raw-loader!../../../../../../../examples/jsonExtract";
 
 Make sure you have installed LlamaIndex.TS and have an OpenAI key. If you haven't, check out the [installation](../setup) guide.
 
-You can use [other LLMs](../../examples/other_llms) via their APIs; if you would prefer to use local models check out our [local LLM example](../../examples/local_llm).
+You can use [other LLMs](/docs/llamaindex/modules/llms) via their APIs; if you would prefer to use local models check out our [local LLM example](./local_llm).
 
 ## Set up
 
diff --git a/apps/next/src/content/docs/llamaindex/guide/workflow/different-inputs-outputs.mdx b/apps/next/src/content/docs/llamaindex/tutorials/workflow/different-inputs-outputs.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/guide/workflow/different-inputs-outputs.mdx
rename to apps/next/src/content/docs/llamaindex/tutorials/workflow/different-inputs-outputs.mdx
diff --git a/apps/next/src/content/docs/llamaindex/guide/workflow/index.mdx b/apps/next/src/content/docs/llamaindex/tutorials/workflow/index.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/guide/workflow/index.mdx
rename to apps/next/src/content/docs/llamaindex/tutorials/workflow/index.mdx
diff --git a/apps/next/src/content/docs/llamaindex/guide/workflow/meta.json b/apps/next/src/content/docs/llamaindex/tutorials/workflow/meta.json
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/guide/workflow/meta.json
rename to apps/next/src/content/docs/llamaindex/tutorials/workflow/meta.json
diff --git a/apps/next/src/content/docs/llamaindex/guide/workflow/streaming.mdx b/apps/next/src/content/docs/llamaindex/tutorials/workflow/streaming.mdx
similarity index 100%
rename from apps/next/src/content/docs/llamaindex/guide/workflow/streaming.mdx
rename to apps/next/src/content/docs/llamaindex/tutorials/workflow/streaming.mdx
diff --git a/examples/agentworkflow/single-agent.ts b/examples/agentworkflow/single-agent.ts
index ea94e44a1..1e55cc355 100644
--- a/examples/agentworkflow/single-agent.ts
+++ b/examples/agentworkflow/single-agent.ts
@@ -1,36 +1,31 @@
 /**
- * This example shows how to use AgentWorkflow as a single agent with tools
+ * This example shows how to use a single agent with a tool
  */
 import { openai } from "@llamaindex/openai";
-import { Settings, agent } from "llamaindex";
+import { agent } from "llamaindex";
 import { getWeatherTool } from "../agent/utils/tools";
 
-Settings.llm = openai({
-  model: "gpt-4o",
-});
-
-async function singleWeatherAgent() {
-  const workflow = agent({
+async function main() {
+  const weatherAgent = agent({
+    llm: openai({
+      model: "gpt-4o",
+    }),
     tools: [getWeatherTool],
     verbose: false,
   });
 
-  const workflowContext = workflow.run(
-    "What's the weather like in San Francisco?",
-  );
-  const sfResult = await workflowContext;
-  // The weather in San Francisco, CA is currently sunny.
-  console.log(`${JSON.stringify(sfResult, null, 2)}`);
+  // Run the agent and keep the context
+  const context = weatherAgent.run("What's the weather like in San Francisco?");
+  const result = await context;
+  console.log(`${JSON.stringify(result, null, 2)}`);
 
   // Reuse the context from the previous run
-  const workflowContext2 = workflow.run("Compare it with California?", {
-    context: workflowContext.data,
+  const caResult = await weatherAgent.run("Compare it with California?", {
+    context: context.data,
   });
-  const caResult = await workflowContext2;
-  // Both San Francisco and California are currently experiencing sunny weather.
   console.log(`${JSON.stringify(caResult, null, 2)}`);
 }
 
-singleWeatherAgent().catch((error) => {
+main().catch((error) => {
   console.error("Error:", error);
 });
diff --git a/packages/community/README.md b/packages/community/README.md
index 0f108a70d..2051da7ed 100644
--- a/packages/community/README.md
+++ b/packages/community/README.md
@@ -5,8 +5,8 @@
 ## Current Features:
 
 - Bedrock support for Amazon Nova models Pro, Lite and Micro
-- Bedrock support for the Anthropic Claude Models [usage](https://ts.llamaindex.ai/modules/llms/available_llms/bedrock) including the latest Sonnet 3.5 v2 and Haiku 3.5
-- Bedrock support for the Meta LLama 2, 3, 3.1 and 3.2 Models [usage](https://ts.llamaindex.ai/modules/llms/available_llms/bedrock)
+- Bedrock support for the Anthropic Claude Models [usage](https://ts.llamaindex.ai/docs/llamaindex/modules/llms/bedrock) including the latest Sonnet 3.5 v2 and Haiku 3.5
+- Bedrock support for the Meta LLama 2, 3, 3.1 and 3.2 Models [usage](https://ts.llamaindex.ai/docs/llamaindex/modules/llms/bedrock)
 - Meta LLama3.1 405b and Llama3.2 tool call support
 - Meta 3.2 11B and 90B vision support
 - Bedrock support for querying Knowledge Base
-- 
GitLab