From f02621e3790ba70b7e3f47a65058ae613f7ecf27 Mon Sep 17 00:00:00 2001
From: Ihor Pavlenko <my8bit@users.noreply.github.com>
Date: Tue, 4 Feb 2025 04:46:04 +0100
Subject: [PATCH] fix: doc links (#1610)

---
 .changeset/olive-foxes-watch.md                        |  5 +++++
 .../content/docs/llamaindex/guide/agents/1_setup.mdx   |  4 ++--
 .../docs/llamaindex/guide/agents/2_create_agent.mdx    |  4 ++--
 .../docs/llamaindex/guide/agents/3_local_model.mdx     |  2 +-
 .../docs/llamaindex/guide/agents/4_agentic_rag.mdx     |  2 +-
 .../docs/llamaindex/guide/agents/5_rag_and_tools.mdx   |  2 +-
 .../docs/llamaindex/guide/agents/6_llamaparse.mdx      |  2 +-
 .../content/docs/llamaindex/guide/agents/7_qdrant.mdx  | 10 +++++-----
 8 files changed, 18 insertions(+), 13 deletions(-)
 create mode 100644 .changeset/olive-foxes-watch.md

diff --git a/.changeset/olive-foxes-watch.md b/.changeset/olive-foxes-watch.md
new file mode 100644
index 000000000..3ae39d5bf
--- /dev/null
+++ b/.changeset/olive-foxes-watch.md
@@ -0,0 +1,5 @@
+---
+"@llamaindex/doc": patch
+---
+
+Fix internal links between chapters
diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/1_setup.mdx b/apps/next/src/content/docs/llamaindex/guide/agents/1_setup.mdx
index 770fb26c8..022308116 100644
--- a/apps/next/src/content/docs/llamaindex/guide/agents/1_setup.mdx
+++ b/apps/next/src/content/docs/llamaindex/guide/agents/1_setup.mdx
@@ -20,7 +20,7 @@ npm install llamaindex
 
 ## Choose your model
 
-By default we'll be using OpenAI with GPT-4, as it's a powerful model and easy to get started with. If you'd prefer to run a local model, see [using a local model](local_model).
+By default we'll be using OpenAI with GPT-4, as it's a powerful model and easy to get started with. If you'd prefer to run a local model, see [using a local model](3_local_model).
 
 ## Get an OpenAI API key
 
@@ -36,4 +36,4 @@ We'll use `dotenv` to pull the API key out of that .env file, so also run:
 npm install dotenv
 ```
 
-Now you're ready to [create your agent](create_agent).
+Now you're ready to [create your agent](2_create_agent).
diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/2_create_agent.mdx b/apps/next/src/content/docs/llamaindex/guide/agents/2_create_agent.mdx
index 909250a2b..860af4573 100644
--- a/apps/next/src/content/docs/llamaindex/guide/agents/2_create_agent.mdx
+++ b/apps/next/src/content/docs/llamaindex/guide/agents/2_create_agent.mdx
@@ -177,5 +177,5 @@ The second piece of output is the response from the LLM itself, where the `messa
 Great! We've built an agent with tool use! Next you can:
 
 - [See the full code](https://github.com/run-llama/ts-agents/blob/main/1_agent/agent.ts)
-- [Switch to a local LLM](local_model)
-- Move on to [add Retrieval-Augmented Generation to your agent](agentic_rag)
+- [Switch to a local LLM](3_local_model)
+- Move on to [add Retrieval-Augmented Generation to your agent](4_agentic_rag)
diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/3_local_model.mdx b/apps/next/src/content/docs/llamaindex/guide/agents/3_local_model.mdx
index 0c649dfe3..0224ff51f 100644
--- a/apps/next/src/content/docs/llamaindex/guide/agents/3_local_model.mdx
+++ b/apps/next/src/content/docs/llamaindex/guide/agents/3_local_model.mdx
@@ -89,4 +89,4 @@ You can use a ReActAgent instead of an OpenAIAgent in any of the further example
 
 ### Next steps
 
-Now you've got a local agent, you can [add Retrieval-Augmented Generation to your agent](agentic_rag).
+Now you've got a local agent, you can [add Retrieval-Augmented Generation to your agent](4_agentic_rag).
diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/4_agentic_rag.mdx b/apps/next/src/content/docs/llamaindex/guide/agents/4_agentic_rag.mdx
index f5f4432af..465f38299 100644
--- a/apps/next/src/content/docs/llamaindex/guide/agents/4_agentic_rag.mdx
+++ b/apps/next/src/content/docs/llamaindex/guide/agents/4_agentic_rag.mdx
@@ -153,4 +153,4 @@ The `OpenAIContextAwareAgent` approach simplifies the setup by allowing you to d
 
 On the other hand, using the `QueryEngineTool` offers more flexibility and power. This method allows for customization in how queries are constructed and executed, enabling you to query data from various storages and process them in different ways. However, this added flexibility comes with increased complexity and response time due to the separate tool call and queryEngine generating tool output by LLM that is then passed to the agent.
 
-So now we have an agent that can index complicated documents and answer questions about them. Let's [combine our math agent and our RAG agent](rag_and_tools)!
+So now we have an agent that can index complicated documents and answer questions about them. Let's [combine our math agent and our RAG agent](5_rag_and_tools)!
diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/5_rag_and_tools.mdx b/apps/next/src/content/docs/llamaindex/guide/agents/5_rag_and_tools.mdx
index 0f95857d2..b68a939a3 100644
--- a/apps/next/src/content/docs/llamaindex/guide/agents/5_rag_and_tools.mdx
+++ b/apps/next/src/content/docs/llamaindex/guide/agents/5_rag_and_tools.mdx
@@ -127,4 +127,4 @@ In the final tool call, it used the `sumNumbers` function to add the two budgets
 }
 ```
 
-Great! Now let's improve accuracy by improving our parsing with [LlamaParse](llamaparse).
+Great! Now let's improve accuracy by improving our parsing with [LlamaParse](6_llamaparse).
diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/6_llamaparse.mdx b/apps/next/src/content/docs/llamaindex/guide/agents/6_llamaparse.mdx
index dc0047add..1eb845b95 100644
--- a/apps/next/src/content/docs/llamaindex/guide/agents/6_llamaparse.mdx
+++ b/apps/next/src/content/docs/llamaindex/guide/agents/6_llamaparse.mdx
@@ -17,4 +17,4 @@ const documents = await reader.loadData("../data/sf_budget_2023_2024.pdf");
 
 Now you will be able to ask more complicated questions of the same PDF and get better results. You can find this code [in our repo](https://github.com/run-llama/ts-agents/blob/main/4_llamaparse/agent.ts).
 
-Next up, let's persist our embedded data so we don't have to re-parse every time by [using a vector store](qdrant).
+Next up, let's persist our embedded data so we don't have to re-parse every time by [using a vector store](7_qdrant).
diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/7_qdrant.mdx b/apps/next/src/content/docs/llamaindex/guide/agents/7_qdrant.mdx
index d6154c580..eb3c45005 100644
--- a/apps/next/src/content/docs/llamaindex/guide/agents/7_qdrant.mdx
+++ b/apps/next/src/content/docs/llamaindex/guide/agents/7_qdrant.mdx
@@ -65,13 +65,13 @@ Since parsing a PDF can be slow, especially a large one, using the pre-parsed ch
 
 In this guide you've learned how to
 
-- [Create an agent](create_agent)
+- [Create an agent](2_create_agent)
 - Use remote LLMs like GPT-4
-- [Use local LLMs like Mixtral](local_model)
-- [Create a RAG query engine](agentic_rag)
-- [Turn functions and query engines into agent tools](rag_and_tools)
+- [Use local LLMs like Mixtral](3_local_model)
+- [Create a RAG query engine](4_agentic_rag)
+- [Turn functions and query engines into agent tools](5_rag_and_tools)
 - Combine those tools
-- [Enhance your parsing with LlamaParse](llamaparse)
+- [Enhance your parsing with LlamaParse](6_llamaparse)
 - Persist your data in a vector store
 
 The next steps are up to you! Try creating more complex functions and query engines, and set your agent loose on the world.
-- 
GitLab