From 1c13ed2d7e274cffe007dc3ae213b58c50e4b7f4 Mon Sep 17 00:00:00 2001
From: Logan <logan.markewich@live.com>
Date: Sat, 10 Jun 2023 13:46:10 -0600
Subject: [PATCH] Add ChangeLog to the docs (#6409)

---
 CHANGELOG.md                                  |  2 +-
 CONTRIBUTING.md                               |  4 +-
 docs/{README.md => DOCS_README.md}            |  2 +-
 docs/conf.py                                  |  1 +
 docs/development/changelog.rst                |  1 +
 docs/development/contributing.rst             |  1 +
 docs/development/documentation.rst            |  1 +
 docs/how_to/connector/modules.md              | 44 +++++++++----------
 docs/how_to/connector/root.md                 |  2 +-
 docs/how_to/integrations/vector_stores.md     |  2 +-
 .../advanced/query_transformations.md         |  4 +-
 docs/how_to/storage/customization.md          |  2 +-
 docs/index.rst                                |  9 +++-
 .../sub_question_query_engine.rst             |  2 +-
 docs/requirements.txt                         |  1 +
 docs/use_cases/agents.md                      |  2 +-
 docs/use_cases/queries.md                     |  8 ++--
 17 files changed, 50 insertions(+), 38 deletions(-)
 rename docs/{README.md => DOCS_README.md} (98%)
 create mode 100644 docs/development/changelog.rst
 create mode 100644 docs/development/contributing.rst
 create mode 100644 docs/development/documentation.rst

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 96f5a8a1e7..f5239c1833 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,4 +1,4 @@
-# Changelog
+# ChangeLog
 
 ## [Unreleased]
 
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 431cdbe45a..caf8a9ad97 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,4 +1,4 @@
-# 💡 Contributing to LlamaIndex
+# Contributing to LlamaIndex
 Interested in contributing to LlamaIndex? Here's how to get started! 
 
 ## Contribution Guideline
@@ -17,7 +17,7 @@ Also, join our Discord for ideas and discussions: https://discord.gg/dGcwcsnxhU.
 
 ### 1. 🆕 Extend Core Modules
 The most impactful way to contribute to LlamaIndex is extending our core modules:
-![LlamaIndex modules](docs/_static/contribution/contrib.png)
+![LlamaIndex modules](https://github.com/jerryjliu/llama_index/raw/main/docs/_static/contribution/contrib.png)
 
 We welcome contributions in _all_ modules shown above.
 So far, we have implemented a core set of functionalities for each.
diff --git a/docs/README.md b/docs/DOCS_README.md
similarity index 98%
rename from docs/README.md
rename to docs/DOCS_README.md
index bbcf904b8a..4a7df91f0a 100644
--- a/docs/README.md
+++ b/docs/DOCS_README.md
@@ -1,4 +1,4 @@
-# LlamaIndex Documentation
+# Documentation Guide
 
 ## A guide for docs contributors
 
diff --git a/docs/conf.py b/docs/conf.py
index b0bc935218..9ff2ec5084 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -39,6 +39,7 @@ extensions = [
     "sphinx.ext.napoleon",
     "sphinx_rtd_theme",
     "sphinx.ext.mathjax",
+    "m2r2",
     "myst_nb",
 ]
 
diff --git a/docs/development/changelog.rst b/docs/development/changelog.rst
new file mode 100644
index 0000000000..0520a4c2c5
--- /dev/null
+++ b/docs/development/changelog.rst
@@ -0,0 +1 @@
+.. mdinclude:: ../../CHANGELOG.md
\ No newline at end of file
diff --git a/docs/development/contributing.rst b/docs/development/contributing.rst
new file mode 100644
index 0000000000..36431a6a4d
--- /dev/null
+++ b/docs/development/contributing.rst
@@ -0,0 +1 @@
+.. mdinclude:: ../../CONTRIBUTING.md
\ No newline at end of file
diff --git a/docs/development/documentation.rst b/docs/development/documentation.rst
new file mode 100644
index 0000000000..e0b6bc22d9
--- /dev/null
+++ b/docs/development/documentation.rst
@@ -0,0 +1 @@
+.. mdinclude:: ../DOCS_README.md
\ No newline at end of file
diff --git a/docs/how_to/connector/modules.md b/docs/how_to/connector/modules.md
index e3d7ab2e96..f2f9c78f03 100644
--- a/docs/how_to/connector/modules.md
+++ b/docs/how_to/connector/modules.md
@@ -1,30 +1,30 @@
 # Module Guides
 
+
 ```{toctree}
 ---
 maxdepth: 1
 ---
-
-../examples/data_connectors/PsychicDemo.ipynb
-../examples/data_connectors/DeepLakeReader.ipynb
-../examples/data_connectors/QdrantDemo.ipynb
-../examples/data_connectors/DiscordDemo.ipynb
-../examples/data_connectors/MongoDemo.ipynb
-../examples/data_connectors/ChromaDemo.ipynb
-../examples/data_connectors/MyScaleReaderDemo.ipynb
-../examples/data_connectors/FaissDemo.ipynb
-../examples/data_connectors/ObsidianReaderDemo.ipynb
-../examples/data_connectors/SlackDemo.ipynb
-../examples/data_connectors/WebPageDemo.ipynb
-../examples/data_connectors/PineconeDemo.ipynb
-../examples/data_connectors/MboxReaderDemo.ipynb
-../examples/data_connectors/MilvusReaderDemo.ipynb
-../examples/data_connectors/NotionDemo.ipynb
-../examples/data_connectors/GithubRepositoryReaderDemo.ipynb
-../examples/data_connectors/GoogleDocsDemo.ipynb
-../examples/data_connectors/DatabaseReaderDemo.ipynb
-../examples/data_connectors/TwitterDemo.ipynb
-../examples/data_connectors/WeaviateDemo.ipynb
-../examples/data_connectors/MakeDemo.ipynb
+../../examples/data_connectors/PsychicDemo.ipynb
+../../examples/data_connectors/DeepLakeReader.ipynb
+../../examples/data_connectors/QdrantDemo.ipynb
+../../examples/data_connectors/DiscordDemo.ipynb
+../../examples/data_connectors/MongoDemo.ipynb
+../../examples/data_connectors/ChromaDemo.ipynb
+../../examples/data_connectors/MyScaleReaderDemo.ipynb
+../../examples/data_connectors/FaissDemo.ipynb
+../../examples/data_connectors/ObsidianReaderDemo.ipynb
+../../examples/data_connectors/SlackDemo.ipynb
+../../examples/data_connectors/WebPageDemo.ipynb
+../../examples/data_connectors/PineconeDemo.ipynb
+../../examples/data_connectors/MboxReaderDemo.ipynb
+../../examples/data_connectors/MilvusReaderDemo.ipynb
+../../examples/data_connectors/NotionDemo.ipynb
+../../examples/data_connectors/GithubRepositoryReaderDemo.ipynb
+../../examples/data_connectors/GoogleDocsDemo.ipynb
+../../examples/data_connectors/DatabaseReaderDemo.ipynb
+../../examples/data_connectors/TwitterDemo.ipynb
+../../examples/data_connectors/WeaviateDemo.ipynb
+../../examples/data_connectors/MakeDemo.ipynb
 ```
 
diff --git a/docs/how_to/connector/root.md b/docs/how_to/connector/root.md
index 8775af6f2f..0f7db575bc 100644
--- a/docs/how_to/connector/root.md
+++ b/docs/how_to/connector/root.md
@@ -45,5 +45,5 @@ See below for detailed guides.
 ---
 maxdepth: 2
 ---
-modules.md
+modules.rst
 ```
\ No newline at end of file
diff --git a/docs/how_to/integrations/vector_stores.md b/docs/how_to/integrations/vector_stores.md
index d5ef264579..dc95392c41 100644
--- a/docs/how_to/integrations/vector_stores.md
+++ b/docs/how_to/integrations/vector_stores.md
@@ -326,7 +326,7 @@ index = VectorStoreIndex.from_documents(uber_docs, storage_context=storage_conte
 
 ## Loading Data from Vector Stores using Data Connector
 
-LlamaIndex supports oading data from the following sources. See [Data Connectors](/how_to/data_connectors.md) for more details and API documentation.
+LlamaIndex supports loading data from the following sources. See [Data Connectors](../connector/root.md) for more details and API documentation.
 
 Chroma stores both documents and vectors. This is an example of how to use Chroma:
 
diff --git a/docs/how_to/query_engine/advanced/query_transformations.md b/docs/how_to/query_engine/advanced/query_transformations.md
index 81690c7272..a09b8cf46b 100644
--- a/docs/how_to/query_engine/advanced/query_transformations.md
+++ b/docs/how_to/query_engine/advanced/query_transformations.md
@@ -43,7 +43,7 @@ print(response)
 
 ```
 
-Check out our [example notebook](../../examples/query_transformations/HyDEQueryTransformDemo.ipynb) for a full walkthrough.
+Check out our [example notebook](../../../examples/query_transformations/HyDEQueryTransformDemo.ipynb) for a full walkthrough.
 
 
 ### Single-Step Query Decomposition
@@ -55,7 +55,7 @@ If your query is complex, different parts of your knowledge base may answer diff
 
 Our single-step query decomposition feature transforms a **complicated** question into a simpler one over the data collection to help provide a sub-answer to the original question.
 
-This is especially helpful over a [composed graph](/how_to/index_structs/composability.md). Within a composed graph, a query can be routed to multiple subindexes, each representing a subset of the overall knowledge corpus. Query decomposition allows us to transform the query into a more suitable question over any given index.
+This is especially helpful over a [composed graph](../../index/composability.md). Within a composed graph, a query can be routed to multiple subindexes, each representing a subset of the overall knowledge corpus. Query decomposition allows us to transform the query into a more suitable question over any given index.
 
 An example image is shown below.
 
diff --git a/docs/how_to/storage/customization.md b/docs/how_to/storage/customization.md
index ab05c97820..c4d0092a6c 100644
--- a/docs/how_to/storage/customization.md
+++ b/docs/how_to/storage/customization.md
@@ -71,7 +71,7 @@ loaded_indicies = load_index_from_storage(storage_context, index_ids=["<index_id
 You can customize the underlying storage with a one-line change to instantiate different document stores, index stores, and vector stores.
 See [Document Stores](/how_to/storage/docstores.md), [Vector Stores](/how_to/storage/vector_stores.md), [Index Stores](/how_to/storage/index_stores.md) guides for more details.
 
-For saving and loading a graph/composable index, see the [full guide here](/how_to/index_structs/composability.md).
+For saving and loading a graph/composable index, see the [full guide here](../index/composability.md).
 
 ### Vector Store Integrations and Storage
 
diff --git a/docs/index.rst b/docs/index.rst
index cc3024e9f9..8897529a15 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -46,7 +46,6 @@ LlamaIndex provides tools for both beginner users and advanced users. Our high-l
 5 lines of code. Our lower-level APIs allow advanced users to customize and extend any module (data connectors, indices, retrievers, query engines, reranking modules),
 to fit their needs.
 
-
 .. toctree::
    :maxdepth: 1
    :caption: Getting Started
@@ -55,6 +54,14 @@ to fit their needs.
    getting_started/installation.md
    getting_started/starter_example.md
 
+.. toctree::
+   :maxdepth: 2
+   :caption: Development
+   :hidden:
+
+   development/contributing.rst
+   development/documentation.rst
+   development/changelog.rst
 
 .. toctree::
    :maxdepth: 2
diff --git a/docs/reference/query/query_engines/sub_question_query_engine.rst b/docs/reference/query/query_engines/sub_question_query_engine.rst
index ec7d63a024..26a18853c8 100644
--- a/docs/reference/query/query_engines/sub_question_query_engine.rst
+++ b/docs/reference/query/query_engines/sub_question_query_engine.rst
@@ -1,5 +1,5 @@
 Sub Question Query Engine
-=======================
+==========================
 
 .. automodule:: llama_index.query_engine.sub_question_query_engine
    :members:
diff --git a/docs/requirements.txt b/docs/requirements.txt
index f733d7e76c..f493b71a19 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -2,6 +2,7 @@
 sphinx>=4.3.0
 furo>=2023.3.27
 docutils<0.17
+m2r2
 myst-parser
 myst-nb
 sphinx-autobuild
diff --git a/docs/use_cases/agents.md b/docs/use_cases/agents.md
index fb00ba4354..715656474a 100644
--- a/docs/use_cases/agents.md
+++ b/docs/use_cases/agents.md
@@ -49,7 +49,7 @@ Some of these core modules are shown below along with example tutorials (not com
 - [LLM Reranking Guide (Great Gatsby)](/examples/node_postprocessor/LLMReranker-Gatsby.ipynb)
 
 **Chat Engines**
-- [Chat Engines How-To](/how_to/query/chat_engines.md)
+- [Chat Engines How-To](../how_to/chat_engine/root.md)
 
 
 ### Using LlamaIndex as as Tool within an Agent Framework
diff --git a/docs/use_cases/queries.md b/docs/use_cases/queries.md
index 3eb9e89ac1..805c7cab8f 100644
--- a/docs/use_cases/queries.md
+++ b/docs/use_cases/queries.md
@@ -86,7 +86,7 @@ response = query_engine.query("<query_str>")
 ```
 
 **Guides**
-- [Composability](/how_to/index_structs/composability.md)
+- [Composability](../how_to/index/composability.md)
 - [City Analysis](../examples/composable_indices/city_analysis/PineconeDemo-CityAnalysis.ipynb) ([Notebook](https://github.com/jerryjliu/llama_index/blob/main/docs/examples/composable_indices/city_analysis/PineconeDemo-CityAnalysis.ipynb))
 
 
@@ -154,7 +154,7 @@ decompose_transform = DecomposeQueryTransform(
 This module will help break down a complex query into a simpler one over your existing index structure.
 
 **Guides**
-- [Query Transformations](/how_to/query/query_transformations.md)
+- [Query Transformations](../how_to/query_engine/advanced/query_transformations.md)
 - [City Analysis Compare/Contrast Example](../examples//composable_indices/city_analysis/City_Analysis-Decompose.ipynb) ([Notebook](https://github.com/jerryjliu/llama_index/blob/main/docs/examples/composable_indices/city_analysis/City_Analysis-Decompose.ipynb))
 
 You can also rely on the LLM to *infer* whether to perform compare/contrast queries (see Multi-Document Queries below).
@@ -214,7 +214,7 @@ the module will first decompose the query into a simpler initial question "What
 query the index, and then ask followup questions.
 
 **Guides**
-- [Query Transformations](/how_to/query/query_transformations.md)
+- [Query Transformations](../how_to/query_engine/advanced/query_transformations.md)
 - [Multi-Step Query Decomposition](../examples/query_transformations/HyDEQueryTransformDemo.ipynb) ([Notebook](https://github.com/jerryjliu/llama_index/blob/main/docs/examples/query_transformations/HyDEQueryTransformDemo.ipynb))
 
 
@@ -225,7 +225,7 @@ LlamaIndex can support queries that require an understanding of time. It can do
 - Sort by recency and filter outdated context.
 
 **Guides**
-- [Second-Stage Postprocessing Guide](/how_to/query/second_stage.md)
+- [Second-Stage Postprocessing Guide](../how_to/query_engine/advanced/second_stage.md)
 - [Prev/Next Postprocessing](../examples/node_postprocessor/PrevNextPostprocessorDemo.ipynb)
 - [Recency Postprocessing](../examples/node_postprocessor/RecencyPostprocessorDemo.ipynb)
 
-- 
GitLab