diff --git a/llama-index-core/llama_index/core/tools/tool_spec/load_and_search/README.md b/llama-index-core/llama_index/core/tools/tool_spec/load_and_search/README.md
index fad3c0db7b175c721de39790b8cab6f3d7f5be6f..53f2ce8d9ec42aa60eb436abfb515b0fd7fa8688 100644
--- a/llama-index-core/llama_index/core/tools/tool_spec/load_and_search/README.md
+++ b/llama-index-core/llama_index/core/tools/tool_spec/load_and_search/README.md
@@ -1,5 +1,9 @@
 # LoadAndSearch Tool
 
+```bash
+pip install llama-index-tools-wikipedia
+```
+
 This Tool Spec is intended to wrap other tools, allowing the Agent to perform separate loading and reading of data. This is very useful for when tools return information larger than or closer to the size of the context window.
 
 ## Usage
@@ -11,7 +15,7 @@ from llama_index.core.tools.tool_spec.load_and_search import (
     LoadAndSearchToolSpec,
 )
 from llama_index.core.agent import OpenAIAgent
-from llama_hub.tools.wikipedia.base import WikipediaToolSpec
+from llama_index.tools.wikipedia.base import WikipediaToolSpec
 
 wiki_spec = WikipediaToolSpec()
 
diff --git a/llama-index-core/llama_index/core/vector_stores/__init__.py b/llama-index-core/llama_index/core/vector_stores/__init__.py
index c6ce1525becd9199110d4cb60ccd1861526eeecf..713c7f859a1a87f58697457c507d2b69077c87c7 100644
--- a/llama-index-core/llama_index/core/vector_stores/__init__.py
+++ b/llama-index-core/llama_index/core/vector_stores/__init__.py
@@ -1,6 +1,5 @@
 """Vector stores."""
 
-
 from llama_index.core.vector_stores.simple import SimpleVectorStore
 from llama_index.core.vector_stores.types import (
     ExactMatchFilter,
@@ -8,8 +7,10 @@ from llama_index.core.vector_stores.types import (
     FilterOperator,
     MetadataFilter,
     MetadataFilters,
+    MetadataInfo,
     VectorStoreQuery,
     VectorStoreQueryResult,
+    VectorStoreInfo,
 )
 
 __all__ = [
@@ -17,8 +18,10 @@ __all__ = [
     "VectorStoreQueryResult",
     "MetadataFilters",
     "MetadataFilter",
+    "MetadataInfo",
     "ExactMatchFilter",
     "FilterCondition",
     "FilterOperator",
     "SimpleVectorStore",
+    "VectorStoreInfo",
 ]
diff --git a/llama-index-integrations/readers/README.md b/llama-index-integrations/readers/README.md
index 28086dbb4d0887be644bcb7eac2ebf06ba4ca08e..cd6aa2c93d9119e6777c7b06b2c6711b5bb12736 100644
--- a/llama-index-integrations/readers/README.md
+++ b/llama-index-integrations/readers/README.md
@@ -1,15 +1,16 @@
 # Readers (Loaders)
 
-## Reader Usage (Use `download_loader` from LlamaIndex)
+Readers can be installed directly as packages:
 
-You can also use the loaders with `download_loader` from LlamaIndex in a single line of code.
+```bash
+pip install llama-index-readers-google
+```
 
 For example, see the code snippets below using the Google Docs Loader.
 
 ```python
 from llama_index.core import VectorStoreIndex, download_loader
-
-GoogleDocsReader = download_loader("GoogleDocsReader")
+from llama_index.readers.google import GoogleDocsReader
 
 gdoc_ids = ["1wf-y2pd9C878Oh-FmLH7Q_BQkljdm6TQal-c1pUfrec"]
 loader = GoogleDocsReader()
diff --git a/llama-index-integrations/readers/llama-index-readers-agent-search/README.md b/llama-index-integrations/readers/llama-index-readers-agent-search/README.md
index 3d7f9ad2821869e4c8c173a3562c620e685fd1a9..d67611bceac0ac380f4a5255c7dc89adbb874e3f 100644
--- a/llama-index-integrations/readers/llama-index-readers-agent-search/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-agent-search/README.md
@@ -1,5 +1,9 @@
 # AgentSearch Loader
 
+```bash
+pip install llama-index-readers-agent-search
+```
+
 This framework facilitates seamless integration with the AgentSearch dataset or hosted search APIs (e.g. Search Engines) and with RAG-specialized LLM's (e.g. Search Agents).
 
 During query-time, the user passes in the query string, search provider (`bing`, `agent-search`), and RAG provider model (`SciPhi/Sensei-7B-V1`).
@@ -15,9 +19,7 @@ Here's an example usage of the AgentSearchReader.
 # import os
 # os.environ["SCIPHI_API_KEY"] = "..."
 
-from llama_index import download_loader
-
-AgentSearch = download_loader("AgentSearchReader")
+from llama_index.readers.agent_search import AgentSearchReader
 
 reader = AgentSearch()
 
diff --git a/llama-index-integrations/readers/llama-index-readers-airbyte-cdk/README.md b/llama-index-integrations/readers/llama-index-readers-airbyte-cdk/README.md
index 534cdab3b27df613832dfd989037695137e93e2b..bf621cf2a45d44cb87e51b3552da4f599c59deed 100644
--- a/llama-index-integrations/readers/llama-index-readers-airbyte-cdk/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-airbyte-cdk/README.md
@@ -1,10 +1,14 @@
 # Airbyte CDK Loader
 
+```bash
+pip install llama-index-readers-airbyte-cdk
+```
+
 The Airbyte CDK Loader is a shim for sources created using the [Airbyte Python CDK](https://docs.airbyte.com/connector-development/cdk-python/). It allows you to load data from any Airbyte source into LlamaIndex.
 
 ## Installation
 
-- Install llama_hub: `pip install llama_hub`
+- Install llama-index reader: `pip install llama-index-readers-airbyte-cdk`
 - Install airbyte-cdk: `pip install airbyte-cdk`
 - Install a source via git (or implement your own): `pip install git+https://github.com/airbytehq/airbyte.git@master#egg=source_github&subdirectory=airbyte-integrations/connectors/source-github`
 
@@ -15,8 +19,7 @@ Implement and import your own source. You can find lots of resources for how to
 Here's an example usage of the AirbyteCdkReader.
 
 ```python
-from llama_index import download_loader
-from llama_hub.airbyte_cdk import AirbyteCDKReader
+from llama_index.readers.airbyte_cdk import AirbyteCDKReader
 from source_github.source import (
     SourceGithub,
 )  # this is just an example, you can use any source here - this one is loaded from the Airbyte Github repo via pip install git+https://github.com/airbytehq/airbyte.git@master#egg=source_github&subdirectory=airbyte-integrations/connectors/source-github`
diff --git a/llama-index-integrations/readers/llama-index-readers-airbyte-gong/README.md b/llama-index-integrations/readers/llama-index-readers-airbyte-gong/README.md
index 94a7c3f130e308dac0af67797fa62c071ba5b44b..20b46ee18e0757dc92b08ecd5060511b760689ea 100644
--- a/llama-index-integrations/readers/llama-index-readers-airbyte-gong/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-airbyte-gong/README.md
@@ -1,18 +1,17 @@
 # Airbyte Gong Loader
 
-The Airbyte Gong Loader allows you to access different Gong objects.
-
-## Installation
+```bash
+pip install llama-index-readers-airbyte-gong
+```
 
-- Install llama_hub: `pip install llama_hub`
-- Install the gong source: `pip install airbyte-source-gong`
+The Airbyte Gong Loader allows you to access different Gong objects.
 
 ## Usage
 
 Here's an example usage of the AirbyteGongReader.
 
 ```python
-from llama_hub.airbyte_gong import AirbyteGongReader
+from llama_index.readers.airbyte_gong import AirbyteGongReader
 
 gong_config = {
     # ...
diff --git a/llama-index-integrations/readers/llama-index-readers-airbyte-hubspot/README.md b/llama-index-integrations/readers/llama-index-readers-airbyte-hubspot/README.md
index 34fca9a7318d5904f54ee50ecee3b0ee174030ad..29eef9378103010485db05c99ef23293f600fc08 100644
--- a/llama-index-integrations/readers/llama-index-readers-airbyte-hubspot/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-airbyte-hubspot/README.md
@@ -1,18 +1,17 @@
 # Airbyte Hubspot Loader
 
-The Airbyte Hubspot Loader allows you to access different Hubspot objects.
-
-## Installation
+```bash
+pip install llama-index-readers-airbyte-hubspot
+```
 
-- Install llama_hub: `pip install llama_hub`
-- Install the hubspot source: `pip install airbyte-source-hubspot`
+The Airbyte Hubspot Loader allows you to access different Hubspot objects.
 
 ## Usage
 
 Here's an example usage of the AirbyteHubspotReader.
 
 ```python
-from llama_hub.airbyte_hubspot import AirbyteHubspotReader
+from llama_index.readers.airbyte_hubspot import AirbyteHubspotReader
 
 hubspot_config = {
     # ...
diff --git a/llama-index-integrations/readers/llama-index-readers-airbyte-salesforce/README.md b/llama-index-integrations/readers/llama-index-readers-airbyte-salesforce/README.md
index 375ea08282eea4127948075d2dd2c350d125cfad..7b03d4492a99893161a70c0c65632bdcd66e9cd1 100644
--- a/llama-index-integrations/readers/llama-index-readers-airbyte-salesforce/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-airbyte-salesforce/README.md
@@ -1,18 +1,17 @@
 # Airbyte Salesforce Loader
 
-The Airbyte Salesforce Loader allows you to access different Salesforce objects.
-
-## Installation
+```bash
+pip install llama-index-readers-airbyte-salesforce
+```
 
-- Install llama_hub: `pip install llama_hub`
-- Install the salesforce source: `pip install airbyte-source-salesforce`
+The Airbyte Salesforce Loader allows you to access different Salesforce objects.
 
 ## Usage
 
 Here's an example usage of the AirbyteSalesforceReader.
 
 ```python
-from llama_hub.airbyte_salesforce import AirbyteSalesforceReader
+from llama_index.readers.airbyte_salesforce import AirbyteSalesforceReader
 
 salesforce_config = {
     # ...
diff --git a/llama-index-integrations/readers/llama-index-readers-airbyte-shopify/README.md b/llama-index-integrations/readers/llama-index-readers-airbyte-shopify/README.md
index 8802120283d4f74604fd71bb3f6a6553a338b453..c2c250b5521e9eb94eeb6b2d593cbf0206da6edc 100644
--- a/llama-index-integrations/readers/llama-index-readers-airbyte-shopify/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-airbyte-shopify/README.md
@@ -1,18 +1,17 @@
 # Airbyte Shopify Loader
 
-The Airbyte Shopify Loader allows you to access different Shopify objects.
-
-## Installation
+```bash
+pip install llama-index-readers-airbyte-shopify
+```
 
-- Install llama_hub: `pip install llama_hub`
-- Install the shopify source: `pip install airbyte-source-shopify`
+The Airbyte Shopify Loader allows you to access different Shopify objects.
 
 ## Usage
 
 Here's an example usage of the AirbyteShopifyReader.
 
 ```python
-from llama_hub.airbyte_shopify import AirbyteShopifyReader
+from llama_index.readers.airbyte_shopify import AirbyteShopifyReader
 
 shopify_config = {
     # ...
diff --git a/llama-index-integrations/readers/llama-index-readers-airbyte-stripe/README.md b/llama-index-integrations/readers/llama-index-readers-airbyte-stripe/README.md
index 094255a57496959df728a0e643bac6902f1531b2..96b9dfe3de793fcee8f8a5d5c9fa507e98fdb863 100644
--- a/llama-index-integrations/readers/llama-index-readers-airbyte-stripe/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-airbyte-stripe/README.md
@@ -1,18 +1,17 @@
 # Airbyte Stripe Loader
 
-The Airbyte Stripe Loader allows you to access different Stripe objects.
-
-## Installation
+```bash
+pip install llama-index-readers-airbyte-stripe
+```
 
-- Install llama_hub: `pip install llama_hub`
-- Install the stripe source: `pip install airbyte-source-stripe`
+The Airbyte Stripe Loader allows you to access different Stripe objects.
 
 ## Usage
 
 Here's an example usage of the AirbyteStripeReader.
 
 ```python
-from llama_hub.airbyte_stripe import AirbyteStripeReader
+from llama_index.readers.airbyte_stripe import AirbyteStripeReader
 
 stripe_config = {
     # ...
diff --git a/llama-index-integrations/readers/llama-index-readers-airbyte-typeform/README.md b/llama-index-integrations/readers/llama-index-readers-airbyte-typeform/README.md
index bb9338d260ad32f3fd283c252a8079362b83abb0..a4f7ee97601420bed865fd151e22537c414f7b9c 100644
--- a/llama-index-integrations/readers/llama-index-readers-airbyte-typeform/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-airbyte-typeform/README.md
@@ -1,18 +1,17 @@
 # Airbyte Typeform Loader
 
-The Airbyte Typeform Loader allows you to access different Typeform objects.
-
-## Installation
+```bash
+pip install llama-index-readers-airbyte-typeform
+```
 
-- Install llama_hub: `pip install llama_hub`
-- Install the typeform source: `pip install airbyte-source-typeform`
+The Airbyte Typeform Loader allows you to access different Typeform objects.
 
 ## Usage
 
 Here's an example usage of the AirbyteTypeformReader.
 
 ```python
-from llama_hub.airbyte_typeform import AirbyteTypeformReader
+from llama_index.readers.airbyte_typeform import AirbyteTypeformReader
 
 typeform_config = {
     # ...
diff --git a/llama-index-integrations/readers/llama-index-readers-airbyte-zendesk-support/README.md b/llama-index-integrations/readers/llama-index-readers-airbyte-zendesk-support/README.md
index 8e9359053e2a94106a129b6e4dffbbe277e134d9..72aa30eb1cce0c46b2ac52b22d40075185baf8e7 100644
--- a/llama-index-integrations/readers/llama-index-readers-airbyte-zendesk-support/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-airbyte-zendesk-support/README.md
@@ -1,18 +1,19 @@
 # Airbyte ZendeskSupport Loader
 
-The Airbyte ZendeskSupport Loader allows you to access different ZendeskSupport objects.
-
-## Installation
+```bash
+pip install llama-index-readers-airbyte-zendesk-support
+```
 
-- Install llama_hub: `pip install llama_hub`
-- Install the zendesk_support source: `pip install airbyte-source-zendesk-support`
+The Airbyte ZendeskSupport Loader allows you to access different ZendeskSupport objects.
 
 ## Usage
 
 Here's an example usage of the AirbyteZendeskSupportReader.
 
 ```python
-from llama_hub.airbyte_zendesk_support import AirbyteZendeskSupportReader
+from llama_index.readers.airbyte_zendesk_support import (
+    AirbyteZendeskSupportReader,
+)
 
 zendesk_support_config = {
     # ...
diff --git a/llama-index-integrations/readers/llama-index-readers-airtable/README.md b/llama-index-integrations/readers/llama-index-readers-airtable/README.md
index 64197e51dbcb4076a0116dbe12d4f1dcd02965a3..ab47b7c07de3888d0e44839c6f9164635d09df07 100644
--- a/llama-index-integrations/readers/llama-index-readers-airtable/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-airtable/README.md
@@ -1,5 +1,9 @@
 # Airtable Loader
 
+```bash
+pip install llama-index-readers-airtable
+```
+
 This loader loads documents from Airtable. The user specifies an API token to initialize the AirtableReader. They then specify a `table_id` and a `base_id` to load in the corresponding Document objects.
 
 ## Usage
@@ -7,10 +11,9 @@ This loader loads documents from Airtable. The user specifies an API token to in
 Here's an example usage of the AirtableReader.
 
 ```python
-from llama_index import download_loader
 import os
 
-AirtableReader = download_loader("AirtableReader")
+from llama_index.readers.airtable import AirtableReader
 
 reader = AirtableReader("<Airtable_TOKEN>")
 documents = reader.load_data(table_id="<TABLE_ID>", base_id="<BASE_ID>")
diff --git a/llama-index-integrations/readers/llama-index-readers-apify/README.md b/llama-index-integrations/readers/llama-index-readers-apify/README.md
index e752e540db716bef9ef9572332a89862401177e7..a52ae9787dc5e2afac48ea8ad5c795b391ed7d30 100644
--- a/llama-index-integrations/readers/llama-index-readers-apify/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-apify/README.md
@@ -1,5 +1,9 @@
 # Apify Loaders
 
+```bash
+pip install llama-index-readers-apify
+```
+
 ## Apify Actor Loader
 
 [Apify](https://apify.com/) is a cloud platform for web scraping and data extraction,
@@ -20,8 +24,7 @@ To use this loader, you need to have a (free) Apify account
 and set your [Apify API token](https://console.apify.com/account/integrations) in the code.
 
 ```python
-from llama_index import download_loader
-from llama_index.readers.schema import Document
+from llama_index.core import Document
 
 
 # Converts a single record from the Actor's resulting dataset to the LlamaIndex format
@@ -34,7 +37,7 @@ def tranform_dataset_item(item):
     )
 
 
-ApifyActor = download_loader("ApifyActor")
+from llama_index.readers.apify import ApifyActor
 
 reader = ApifyActor("<My Apify API token>")
 documents = reader.load_data(
@@ -72,8 +75,7 @@ To use this loader, you need to have a (free) Apify account
 and set your [Apify API token](https://console.apify.com/account/integrations) in the code.
 
 ```python
-from llama_index import download_loader
-from llama_index.readers.schema import Document
+from llama_index.core import Document
 
 
 # Converts a single record from the Apify dataset to the LlamaIndex format
@@ -86,7 +88,7 @@ def tranform_dataset_item(item):
     )
 
 
-ApifyDataset = download_loader("ApifyDataset")
+from llama_index.readers.apify import ApifyDataset
 
 reader = ApifyDataset("<Your Apify API token>")
 documents = reader.load_data(
diff --git a/llama-index-integrations/readers/llama-index-readers-arango-db/README.md b/llama-index-integrations/readers/llama-index-readers-arango-db/README.md
index e31489ee1ec9c00459cbf6e6006fd734695b9455..d8fef88eb1f980821118867ab87520e4b1d094c8 100644
--- a/llama-index-integrations/readers/llama-index-readers-arango-db/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-arango-db/README.md
@@ -1,5 +1,9 @@
 # LlamaIndex Readers Integration: Arango Db
 
+```bash
+pip install llama-index-readers-arango-db
+```
+
 This loader loads documents from ArangoDB. The user specifies a ArangoDB instance to
 initialize the reader. They then specify the collection name and query params to
 fetch the relevant docs.
@@ -9,10 +13,9 @@ fetch the relevant docs.
 Here's an example usage of the SimpleArangoDBReader.
 
 ```python
-from llama_index.core.readers import download_loader
 import os
 
-SimpleArangoDBReader = download_loader("SimpleArangoDBReader")
+from llama_index.readers.arango_db import SimpleArangoDBReader
 
 host = "<host>"
 db_name = "<db_name>"
@@ -32,4 +35,4 @@ documents = reader.load_data(
 )
 ```
 
-This loader is designed to be used as a way to load data into [LlamaIndex](https://github.com/run-llama/llama_index/tree/main/llama_index) and/or subsequently used as a Tool in a [LangChain](https://github.com/hwchase17/langchain) Agent. See [here](https://github.com/run-llama/llama-hub/tree/main/llama_hub) for examples.
+This loader is designed to be used as a way to load data into [LlamaIndex](https://github.com/run-llama/llama_index/tree/main/llama_index) and/or subsequently used as a Tool in a [LangChain](https://github.com/hwchase17/langchain) Agent.
diff --git a/llama-index-integrations/readers/llama-index-readers-asana/README.md b/llama-index-integrations/readers/llama-index-readers-asana/README.md
index 2bd439c33e365884fc88df2548adaea21f10e487..cbfe43f8290093cd70f78c234c97049825234aef 100644
--- a/llama-index-integrations/readers/llama-index-readers-asana/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-asana/README.md
@@ -1,5 +1,9 @@
 # Asana Loader
 
+```bash
+pip install llama-index-readers-asana
+```
+
 This loader loads documents from Asana. The user specifies an API token to initialize the AsanaReader. They then specify a `workspace_id` OR a `project_id` to load in the corresponding Document objects.
 
 ## Usage
@@ -7,10 +11,9 @@ This loader loads documents from Asana. The user specifies an API token to initi
 Here's an example usage of the AsanaReader.
 
 ```python
-from llama_index import download_loader
 import os
 
-AsanaReader = download_loader("AsanaReader")
+from llama_index.readers.asana import AsanaReader
 
 reader = AsanaReader("<ASANA_TOKEN>")
 
diff --git a/llama-index-integrations/readers/llama-index-readers-assemblyai/README.md b/llama-index-integrations/readers/llama-index-readers-assemblyai/README.md
index e0e7d14cfef64058022f61d65d847dd18d6d000b..36fdd1c65800e20a0f2723f7596a708096ae0a20 100644
--- a/llama-index-integrations/readers/llama-index-readers-assemblyai/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-assemblyai/README.md
@@ -1,5 +1,9 @@
 # AssemblyAI Audio Transcript Loader
 
+```bash
+pip install llama-index-readers-assemblyai
+```
+
 The AssemblyAI Audio Transcript Loader allows to transcribe audio files with the [AssemblyAI API](https://www.assemblyai.com/) and loads the transcribed text into documents.
 
 To use it, you should have the `assemblyai` python package installed, and the environment variable `ASSEMBLYAI_API_KEY` set with your API key. Alternatively, the API key can also be passed as an argument.
@@ -10,40 +14,12 @@ More info about AssemblyAI:
 - [Get a Free API key](https://www.assemblyai.com/dashboard/signup)
 - [AssemblyAI API Docs](https://www.assemblyai.com/docs)
 
-## Installation
-
-First, you need to install the `assemblyai` python package.
-
-You can find more info about it inside the [assemblyai-python-sdk GitHub repo](https://github.com/AssemblyAI/assemblyai-python-sdk).
-
-```bash
-pip install assemblyai
-```
-
-Optionally: You can install the AssemblyAI integration yourself with:
-
-```bash
-pip install llama-index-readers-assemblyai
-```
-
-Then you can import it with:
-
-```python
-from llama_index.readers.assemblyai import AssemblyAIAudioTranscriptReader
-```
-
-As an alternative, you can also use the `download_loader()` to install and use this integration (see next section).
-
 ## Usage
 
 The `AssemblyAIAudioTranscriptReader` needs at least the `file_path` argument. Audio files can be specified as an URL or a local file path.
 
 ```python
-from llama_index.core import download_loader
-
-AssemblyAIAudioTranscriptReader = download_loader(
-    "AssemblyAIAudioTranscriptReader"
-)
+from llama_index.readers.assemblyai import AssemblyAIAudioTranscriptReader
 
 audio_file = "https://storage.googleapis.com/aai-docs-samples/nbc.mp3"
 # or a local file path: audio_file = "./nbc.mp3"
diff --git a/llama-index-integrations/readers/llama-index-readers-astra-db/README.md b/llama-index-integrations/readers/llama-index-readers-astra-db/README.md
index bd7b4e7cdeff207964b17962b67f7c03790d9ba0..eda6a64ad4ef49f17b1bb8084c6ae25fc4979c5b 100644
--- a/llama-index-integrations/readers/llama-index-readers-astra-db/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-astra-db/README.md
@@ -1,5 +1,9 @@
 # Astra DB Loader
 
+```bash
+pip install llama-index-readers-astra-db
+```
+
 The Astra DB Loader returns a set of documents retrieved from Astra DB.
 The user initializes the loader with an Astra DB index. They then pass in a vector.
 
@@ -10,8 +14,6 @@ Here's an example usage of the AstraDBReader.
 ```python
 from openai import OpenAI
 
-from llama_index import download_loader
-
 
 # Get the credentials for Astra DB
 api_endpoint = "https://324<...>f1c.astra.datastax.com"
@@ -29,7 +31,7 @@ response = client.embeddings.create(
 query_vector = response.data[0].embedding
 
 # Initialize the Reader object
-AstraDBReader = download_loader("AstraDBReader")
+from llama_index.readers.astra_db import AstraDBReader
 
 # Your Astra DB Account will provide you with the endpoint URL and Token
 reader = AstraDBReader(
diff --git a/llama-index-integrations/readers/llama-index-readers-athena/README.md b/llama-index-integrations/readers/llama-index-readers-athena/README.md
index afff8d84f5c64b037e3607928824e19a35325e2c..082e2ca83c151b7ada08d696f4ddb62a2d007045 100644
--- a/llama-index-integrations/readers/llama-index-readers-athena/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-athena/README.md
@@ -1,5 +1,11 @@
 # Athena reader.
 
+```bash
+pip install llama-index-readers-athena
+
+pip install llama-index-llms-openai
+```
+
 Athena reader allow execute SQL with AWS Athena. We using SQLAlchemy and PyAthena under the hood.
 
 ## Permissions
@@ -13,10 +19,10 @@ Here's an example usage of the AthenaReader.
 ```
 import os
 import dotenv
-from llama_index import SQLDatabase,ServiceContext
-from llama_index.indices.struct_store import NLSQLTableQueryEngine
-from llama_index.llms import OpenAI
-from llama_hub.athena import AthenaReader
+from llama_index.core import SQLDatabase,ServiceContext
+from llama_index.core.query_engine import NLSQLTableQueryEngine
+from llama_index.llms.openai import OpenAI
+from llama_index.readers.athena import AthenaReader
 
 dotenv.load_dotenv()
 
diff --git a/llama-index-integrations/readers/llama-index-readers-azcognitive-search/README.md b/llama-index-integrations/readers/llama-index-readers-azcognitive-search/README.md
index 953bef038c961428fff2dd37dbfe9d589d709a41..9891527c4abde817938d1ce4d80dc4f76d7a2ed5 100644
--- a/llama-index-integrations/readers/llama-index-readers-azcognitive-search/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-azcognitive-search/README.md
@@ -1,5 +1,9 @@
 # Azure Cognitive Search Loader
 
+```bash
+pip install llama-index-readers-azcognitive-search
+```
+
 The AzCognitiveSearchReader Loader returns a set of texts corresponding to documents retrieved from specific index of Azure Cognitive Search.
 The user initializes the loader with credentials (service name and key) and the index name.
 
@@ -8,9 +12,7 @@ The user initializes the loader with credentials (service name and key) and the
 Here's an example usage of the AzCognitiveSearchReader.
 
 ```python
-from llama_index import download_loader
-
-AzCognitiveSearchReader = download_loader("AzCognitiveSearchReader")
+from llama_index.readers.azcognitive_search import AzCognitiveSearchReader
 
 reader = AzCognitiveSearchReader(
     "<Azure_Cognitive_Search_NAME>",
@@ -30,11 +32,11 @@ documents = reader.load_data(
 ## Usage in combination with langchain
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 from langchain.chains.conversation.memory import ConversationBufferMemory
 from langchain.agents import Tool, AgentExecutor, load_tools, initialize_agent
 
-AzCognitiveSearchReader = download_loader("AzCognitiveSearchReader")
+from llama_index.readers.azcognitive_search import AzCognitiveSearchReader
 
 az_loader = AzCognitiveSearchReader(
     COGNITIVE_SEARCH_SERVICE_NAME, COGNITIVE_SEARCH_KEY, INDEX_NAME
diff --git a/llama-index-integrations/readers/llama-index-readers-azstorage-blob/README.md b/llama-index-integrations/readers/llama-index-readers-azstorage-blob/README.md
index 306aec783686a0fa2d19817d2748f61f703d5fac..e4c1adcbfb2c1e17b9d8b0ecce58264febb19eee 100644
--- a/llama-index-integrations/readers/llama-index-readers-azstorage-blob/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-azstorage-blob/README.md
@@ -1,5 +1,9 @@
 # Azure Storage Blob Loader
 
+```bash
+pip install llama-index-readers-azstorage-blob
+```
+
 This loader parses any file stored as an Azure Storage blob or the entire container (with an optional prefix / attribute filter) if no particular file is specified. When initializing `AzStorageBlobReader`, you may pass in your account url with a SAS token or crdentials to authenticate.
 
 All files are temporarily downloaded locally and subsequently parsed with `SimpleDirectoryReader`. Hence, you may also specify a custom `file_extractor`, relying on any of the loaders in this library (or your own)! If you need a clue on finding the file extractor object because you'd like to use your own file extractor, follow this sample.
@@ -20,9 +24,7 @@ To use this loader, you need to pass in the name of your Azure Storage Container
 ### Using a Storage Account SAS URL
 
 ```python
-from llama_index import download_loader
-
-AzStorageBlobReader = download_loader("AzStorageBlobReader")
+from llama_index.readers.azstorage_blob import AzStorageBlobReader
 
 loader = AzStorageBlobReader(
     container="scrabble-dictionary",
@@ -38,9 +40,7 @@ documents = loader.load_data()
 The sample below will download all files in a container, by only specifying the storage account's connection string and the container name.
 
 ```python
-from llama_index import download_loader
-
-AzStorageBlobReader = download_loader("AzStorageBlobReader")
+from llama_index.readers.azstorage_blob import AzStorageBlobReader
 
 loader = AzStorageBlobReader(
     container_name="<CONTAINER_NAME>",
@@ -57,12 +57,11 @@ Ensure the Azure Identity library is available `pip install azure-identity`
 The sample below downloads all files in the container using the default credential, alternative credential options are available such as a service principal `ClientSecretCredential`
 
 ```python
-from llama_index import download_loader
 from azure.identity import DefaultAzureCredential
 
 default_credential = DefaultAzureCredential()
 
-AzStorageBlobReader = download_loader("AzStorageBlobReader")
+from llama_index.readers.azstorage_blob import AzStorageBlobReader
 
 loader = AzStorageBlobReader(
     container_name="scrabble-dictionary",
diff --git a/llama-index-integrations/readers/llama-index-readers-bilibili/README.md b/llama-index-integrations/readers/llama-index-readers-bilibili/README.md
index 36bc3b949e0fdc2bd5897c44f24f6d7b56d4260f..66e93880f77164e099b16a5a42928c1b34bfc1a9 100644
--- a/llama-index-integrations/readers/llama-index-readers-bilibili/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-bilibili/README.md
@@ -1,5 +1,9 @@
 # Bilibili Transcript Loader
 
+```bash
+pip install llama-index-readers-bilibili
+```
+
 This loader utilizes the `bilibili_api` to fetch the text transcript from Bilibili, one of the most beloved long-form video sites in China.
 
 With this BilibiliTranscriptReader, users can easily obtain the transcript of their desired video content on the platform.
@@ -9,9 +13,8 @@ With this BilibiliTranscriptReader, users can easily obtain the transcript of th
 To use this loader, you need to pass in an array of Bilibili video links.
 
 ```python
-from llama_index import download_loader
+from llama_index.readers.bilibili import BilibiliTranscriptReader
 
-BilibiliTranscriptReader = download_loader("BilibiliTranscriptReader")
 loader = BilibiliTranscriptReader()
 documents = loader.load_data(
     video_urls=["https://www.bilibili.com/video/BV1yx411L73B/"]
diff --git a/llama-index-integrations/readers/llama-index-readers-bitbucket/README.md b/llama-index-integrations/readers/llama-index-readers-bitbucket/README.md
index a5f4c506593c0d6869e9751e99007780b78d50b6..2edc08dadefc44e9dbef371586cb43ccd5d64cca 100644
--- a/llama-index-integrations/readers/llama-index-readers-bitbucket/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-bitbucket/README.md
@@ -1,5 +1,9 @@
 # Bitbucket Loader
 
+```bash
+pip install llama-index-readers-bitbucket
+```
+
 This loader utilizes the Bitbucket API to load the files inside a Bitbucket repository as Documents in an index.
 
 ## Usage
@@ -8,7 +12,7 @@ To use this loader, you need to provide as environment variables the `BITBUCKET_
 
 ```python
 import os
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 
 os.environ["BITBUCKET_USERNAME"] = "myusername"
 os.environ["BITBUCKET_API_KEY"] = "myapikey"
@@ -16,7 +20,7 @@ os.environ["BITBUCKET_API_KEY"] = "myapikey"
 base_url = "https://myserver/bitbucket"
 project_key = "mykey"
 
-BitbucketReader = download_loader("BitbucketReader")
+from llama_index.readers.bitbucket import BitbucketReader
 
 loader = BitbucketReader(
     base_url=base_url,
diff --git a/llama-index-integrations/readers/llama-index-readers-boarddocs/README.md b/llama-index-integrations/readers/llama-index-readers-boarddocs/README.md
index d10393b5c8722e5bf98dce3dec659c0d9fb26fad..ce037d0cc14b11697420604b3763691a6dafb089 100644
--- a/llama-index-integrations/readers/llama-index-readers-boarddocs/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-boarddocs/README.md
@@ -1,5 +1,9 @@
 # BoardDocs Loader
 
+```bash
+pip install llama-index-readers-boarddocs
+```
+
 This loader retrieves an agenda and associated material from a BoardDocs site.
 
 This loader is not endorsed by, developed by, supported by, or in any way formally affiliated with Diligent Corporation.
@@ -10,9 +14,7 @@ To use this loader, you'll need to specify which BoardDocs site you want to load
 as well as the committee on the site you want to scrape.
 
 ```python
-from llama_index import download_loader
-
-BoardDocsReader = download_loader("BoardDocsReader")
+from llama_index.readers.boarddocs import BoardDocsReader
 
 # For a site URL https://go.boarddocs.com/ca/redwood/Board.nsf/Public
 # your site should be set to 'ca/redwood'
diff --git a/llama-index-integrations/readers/llama-index-readers-confluence/README.md b/llama-index-integrations/readers/llama-index-readers-confluence/README.md
index e1cf202d4a724a67b97d5993e8f058e335a39e56..4b55e6c1d6d52187c1ce9fac06acec1a59d142c5 100644
--- a/llama-index-integrations/readers/llama-index-readers-confluence/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-confluence/README.md
@@ -1,5 +1,9 @@
 # Confluence Loader
 
+```bash
+pip install llama-index-readers-confluence
+```
+
 This loader loads pages from a given Confluence cloud instance. The user needs to specify the base URL for a Confluence
 instance to initialize the ConfluenceReader - base URL needs to end with `/wiki`. The user can optionally specify
 OAuth 2.0 credentials to authenticate with the Confluence instance. If no credentials are specified, the loader will
@@ -42,7 +46,7 @@ Here's an example usage of the ConfluenceReader.
 
 ```python
 # Example that reads the pages with the `page_ids`
-from llama_hub.confluence import ConfluenceReader
+from llama_index.readers.confluence import ConfluenceReader
 
 token = {"access_token": "<access_token>", "token_type": "<token_type>"}
 oauth2_dict = {"client_id": "<client_id>", "token": token}
@@ -65,7 +69,7 @@ documents.extend(
 
 ```python
 # Example that fetches the first 5, then the next 5 pages from a space
-from llama_hub.confluence import ConfluenceReader
+from llama_index.readers.confluence import ConfluenceReader
 
 token = {"access_token": "<access_token>", "token_type": "<token_type>"}
 oauth2_dict = {"client_id": "<client_id>", "token": token}
@@ -95,7 +99,7 @@ documents.extend(
 
 ```python
 # Example that fetches the first 5 results froma cql query, the uses the cursor to pick up on the next element
-from llama_hub.confluence import ConfluenceReader
+from llama_index.readers.confluence import ConfluenceReader
 
 token = {"access_token": "<access_token>", "token_type": "<token_type>"}
 oauth2_dict = {"client_id": "<client_id>", "token": token}
diff --git a/llama-index-integrations/readers/llama-index-readers-couchbase/README.md b/llama-index-integrations/readers/llama-index-readers-couchbase/README.md
index 574ba00b2a61b5e687bf0a4923c38b0bac09b88c..f30013bf7a3ff89a01acba257e88b7ef7480d21e 100644
--- a/llama-index-integrations/readers/llama-index-readers-couchbase/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-couchbase/README.md
@@ -1,5 +1,9 @@
 # LlamaIndex Readers Integration: Couchbase
 
+```bash
+pip install llama-index-readers-couchbase
+```
+
 This loader loads documents from Couchbase cluster.
 The user specifies a Couchbase client or credentials to initialize the reader. They can specify the SQL++ query to
 fetch the relevant docs.
@@ -9,10 +13,9 @@ fetch the relevant docs.
 Here's an example usage of the CouchbaseReader.
 
 ```python
-from llama_index.core.readers import download_loader
 import os
 
-CouchbaseLoader = download_loader("CouchbaseReader")
+from llama_index.readers.couchbase import CouchbaseReader
 
 connection_string = (
     "couchbase://localhost"  # valid Couchbase connection string
diff --git a/llama-index-integrations/readers/llama-index-readers-couchdb/README.md b/llama-index-integrations/readers/llama-index-readers-couchdb/README.md
index d738eb39b9ef94346c181af3d5ed74f0f4fb9416..cd3b51286c6c81523498d857402f5386c6879881 100644
--- a/llama-index-integrations/readers/llama-index-readers-couchdb/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-couchdb/README.md
@@ -1,5 +1,9 @@
 # CouchDB Loader
 
+```bash
+pip install llama-index-readers-couchdb
+```
+
 This loader loads documents from CouchDB. The loader currently supports CouchDB 3.x
 using the CouchDB3 python wrapper from https://github.com/n-vlahovic/couchdb3
 The user specifies a CouchDB instance to initialize the reader. They then specify
@@ -10,10 +14,9 @@ the database name and query params to fetch the relevant docs.
 Here's an example usage of the SimpleCouchDBReader.
 
 ```python
-from llama_index import download_loader
 import os
 
-SimpleCouchDBReader = download_loader("SimpleCouchDBReader")
+from llama_index.readers.couchdb import SimpleCouchDBReader
 
 host = "<host>"
 port = "<port>"
diff --git a/llama-index-integrations/readers/llama-index-readers-dad-jokes/README.md b/llama-index-integrations/readers/llama-index-readers-dad-jokes/README.md
index f786ca3b513a3f22cb760fccdc2b4dd1eecd114f..6e07eeffcbda4dfd970c963e7cc18d3de7ee89f5 100644
--- a/llama-index-integrations/readers/llama-index-readers-dad-jokes/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-dad-jokes/README.md
@@ -1,5 +1,9 @@
 # DadJoke Loader
 
+```bash
+pip install llama-index-readers-dad-jokes
+```
+
 This loader fetches a joke from icanhazdadjoke.
 
 ## Usage
@@ -7,9 +11,7 @@ This loader fetches a joke from icanhazdadjoke.
 To use this loader, load it.
 
 ```python
-from llama_index import download_loader
-
-DadJokesReader = download_loader("DadJokesReader")
+from llama_index.readers.dad_jokes import DadJokesReader
 
 loader = DadJokesReader()
 documents = loader.load_data()
diff --git a/llama-index-integrations/readers/llama-index-readers-discord/README.md b/llama-index-integrations/readers/llama-index-readers-discord/README.md
index 522c28c3a63f1c8c8be8c9496fade66db2a4c782..c925cf9539d2a5f803db8ce0b7dd45d0bb4816c9 100644
--- a/llama-index-integrations/readers/llama-index-readers-discord/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-discord/README.md
@@ -1,5 +1,9 @@
 # Discord Loader
 
+```bash
+pip install llama-index-readers-discord
+```
+
 This loader loads conversations from Discord. The user specifies `channel_ids` and we fetch conversations from
 those `channel_ids`.
 
@@ -8,10 +12,9 @@ those `channel_ids`.
 Here's an example usage of the DiscordReader.
 
 ```python
-from llama_index import download_loader
 import os
 
-DiscordReader = download_loader("DiscordReader")
+from llama_index.readers.discord import DiscordReader
 
 discord_token = os.getenv("DISCORD_TOKEN")
 channel_ids = [1057178784895348746]  # Replace with your channel_id
diff --git a/llama-index-integrations/readers/llama-index-readers-docugami/README.md b/llama-index-integrations/readers/llama-index-readers-docugami/README.md
index 764a6b603800d58b1b3c985ccf1bdfcf6beb9eb6..e31a22c9eef45116a497d1dcf6fe3089ebc8fa07 100644
--- a/llama-index-integrations/readers/llama-index-readers-docugami/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-docugami/README.md
@@ -1,5 +1,9 @@
 # Docugami Loader
 
+```bash
+pip install llama-index-readers-docugami
+```
+
 This loader takes in IDs of PDF, DOCX or DOC files processed by [Docugami](https://docugami.com) and returns nodes in a Document XML Knowledge Graph for each document. This is a rich representation that includes the semantic and structural characteristics of various chunks in the document as an XML tree. Entire sets of documents are processed, resulting in forests of XML semantic trees.
 
 ## Pre-requisites
@@ -14,9 +18,7 @@ This loader takes in IDs of PDF, DOCX or DOC files processed by [Docugami](https
 To use this loader, you simply need to pass in a Docugami Doc Set ID, and optionally an array of Document IDs (by default, all documents in the Doc Set are loaded).
 
 ```python
-from llama_index.core import download_loader
-
-DocugamiReader = download_loader("DocugamiReader")
+from llama_index.readers.docugami import DocugamiReader
 
 docset_id = "tjwrr2ekqkc3"
 document_ids = ["ui7pkriyckwi", "1be3o7ch10iy"]
diff --git a/llama-index-integrations/readers/llama-index-readers-earnings-call-transcript/README.md b/llama-index-integrations/readers/llama-index-readers-earnings-call-transcript/README.md
index 1eb7b9f24708f764e3163b6be51a2225b0999608..bd5e23b64f52d0733867df4c1920aad88f9575b8 100644
--- a/llama-index-integrations/readers/llama-index-readers-earnings-call-transcript/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-earnings-call-transcript/README.md
@@ -1,5 +1,9 @@
 # EARNING CALL TRANSCRIPTS LOADER
 
+```bash
+pip install llama-index-readers-earnings-call-transcript
+```
+
 This loader fetches the earning call transcripts of US based companies from the website [discountingcashflows.com](https://discountingcashflows.com/). It is not available for commercial purposes
 
 Install the required dependencies
@@ -17,9 +21,7 @@ The Earning call transcripts takes in three arguments
 ## Usage
 
 ```python
-from llama_index import download_loader
-
-EarningsCallTranscript = download_loader("EarningsCallTranscript")
+from llama_index.readers.earnings_call_transcript import EarningsCallTranscript
 
 loader = EarningsCallTranscript(2023, "AAPL", "Q3")
 docs = loader.load_data()
@@ -37,10 +39,9 @@ The metadata of the transcripts are the following
 #### Llama Index
 
 ```python
-from llama_index import download_loader
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 
-EarningsCallTranscript = download_loader("EarningsCallTranscript")
+from llama_index.readers.earnings_call_transcript import EarningsCallTranscript
 
 loader = EarningsCallTranscript(2023, "AAPL", "Q3")
 docs = loader.load_data()
@@ -57,13 +58,12 @@ print(response)
 #### Langchain
 
 ```python
-from llama_index import download_loader
 from langchain.agents import Tool
 from langchain.agents import initialize_agent
 from langchain.chat_models import ChatOpenAI
 from langchain.llms import OpenAI
 
-EarningsCallTranscript = download_loader("EarningsCallTranscript")
+from llama_index.readers.earnings_call_transcript import EarningsCallTranscript
 
 loader = EarningsCallTranscript(2023, "AAPL", "Q3")
 docs = loader.load_data()
diff --git a/llama-index-integrations/readers/llama-index-readers-feedly-rss/README.md b/llama-index-integrations/readers/llama-index-readers-feedly-rss/README.md
index fa201e53b933fc42006fc658dd21a0408c325b6f..3072a55fe6b9c802a88d771b580ff737d40bfed1 100644
--- a/llama-index-integrations/readers/llama-index-readers-feedly-rss/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-feedly-rss/README.md
@@ -1,13 +1,15 @@
 # Feedly Loader
 
+```bash
+pip install llama-index-readers-feedly-rss
+```
+
 This loader fetches the entries from a list of RSS feeds subscribed in [Feedly](https://feedly.com). You must initialize the loader with your [Feedly API token](https://developer.feedly.com), and then pass the category name which you want to extract.
 
 ## Usage
 
 ```python
-from llama_index import download_loader
-
-feedlyRssReader = download_loader("FeedlyRssReader")
+from llama_index.readers.feedly_rss import FeedlyRssReader
 
 loader = feedlyRssReader(bearer_token="[YOUR_TOKEN]")
 documents = loader.load_data(category_name="news", max_count=100)
diff --git a/llama-index-integrations/readers/llama-index-readers-feishu-docs/README.md b/llama-index-integrations/readers/llama-index-readers-feishu-docs/README.md
index b28f90cfd630223bbd6020ab2332f33cb61ad968..2c378943de1ee091e5d1caf15634f90f6738eb7c 100644
--- a/llama-index-integrations/readers/llama-index-readers-feishu-docs/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-feishu-docs/README.md
@@ -1,5 +1,9 @@
 # Feishu Doc Loader
 
+```bash
+pip install llama-index-readers-feishu-docs
+```
+
 This loader takes in IDs of Feishu Docs and parses their text into `documents`. You can extract a Feishu Doc's ID directly from its URL. For example, the ID of `https://test-csl481dfkgqf.feishu.cn/docx/HIH2dHv21ox9kVxjRuwc1W0jnkf` is `HIH2dHv21ox9kVxjRuwc1W0jnkf`. As a prerequisite, you will need to register with Feishu and build an custom app. See [here](https://open.feishu.cn/document/home/introduction-to-custom-app-development/self-built-application-development-process) for instructions.
 
 ## Usage
@@ -7,12 +11,11 @@ This loader takes in IDs of Feishu Docs and parses their text into `documents`.
 To use this loader, you simply need to pass in an array of Feishu Doc IDs. The default API endpoints are for Feishu, in order to switch to Lark, we should use `set_lark_domain`.
 
 ```python
-from llama_index import download_loader
-
 app_id = "cli_slkdjalasdkjasd"
 app_secret = "dskLLdkasdjlasdKK"
 doc_ids = ["HIH2dHv21ox9kVxjRuwc1W0jnkf"]
-FeishuDocsReader = download_loader("FeishuDocsReader")
+from llama_index.readers.feishu_docs import FeishuDocsReader
+
 loader = FeishuDocsReader(app_id, app_secret)
 documents = loader.load_data(document_ids=doc_ids)
 ```
diff --git a/llama-index-integrations/readers/llama-index-readers-feishu-wiki/README.md b/llama-index-integrations/readers/llama-index-readers-feishu-wiki/README.md
index 5acccf3c8c1221e44c7d04cf8a9df4980d1629d7..1f1b384ba684717ee1bea948931fed16ed51d3ec 100644
--- a/llama-index-integrations/readers/llama-index-readers-feishu-wiki/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-feishu-wiki/README.md
@@ -11,8 +11,6 @@ To use this loader, you need to:
 3. finally, pass your feishu space id to this loader
 
 ```python
-from llama_index import download_loader
-
 app_id = "xxx"
 app_secret = "xxx"
 space_id = "xxx"
diff --git a/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/image_deplot/README.md b/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/image_deplot/README.md
index 05ba4fe82d25e1dfd62537c9be50174997c9c223..c4869ab48d24ee3ca5f59519bf20a46a06f3715b 100644
--- a/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/image_deplot/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/image_deplot/README.md
@@ -1,5 +1,9 @@
 # Image Tabular Chart Loader (Deplot)
 
+```bash
+pip install llama-index-readers-file
+```
+
 This loader captions an image file containing a tabular chart (bar chart, line charts) using deplot.
 
 ## Usage
@@ -8,7 +12,7 @@ To use this loader, you need to pass in a `Path` to a local file.
 
 ```python
 from pathlib import Path
-from llama_hub.file.image_deplot import ImageTabularChartReader
+from llama_index.readers.file import ImageTabularChartReader
 
 loader = ImageTabularChartReader()
 documents = loader.load_data(file=Path("./image.png"))
diff --git a/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/paged_csv/README.md b/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/paged_csv/README.md
index e46a6d4c9a1d82f582e54004b5b9444395284183..a5a0eae2e09e73d46a96ccfc0a102db6895c6d1a 100644
--- a/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/paged_csv/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/paged_csv/README.md
@@ -1,5 +1,9 @@
 # Paged CSV Loader
 
+```bash
+pip install llama-index-readers-file
+```
+
 This loader extracts the text from a local .csv file by formatting each row in an LLM-friendly way and inserting it into a separate Document. A single local file is passed in each time you call `load_data`. For example, a Document might look like:
 
 ```
@@ -15,9 +19,8 @@ To use this loader, you need to pass in a `Path` to a local file.
 
 ```python
 from pathlib import Path
-from llama_index.core.readers import download_loader
 
-PagedCSVReader = download_loader("PagedCSVReader")
+from llama_index.readers.file import PagedCSVReader
 
 loader = PagedCSVReader(encoding="utf-8")
 documents = loader.load_data(file=Path("./transactions.csv"))
diff --git a/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/pymu_pdf/README.md b/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/pymu_pdf/README.md
index 9abd1e99e3d66a519ed395372a8dcf5b17e40e77..cfde1701d6e3a616d10585dc6b8508a8eef6baad 100644
--- a/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/pymu_pdf/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/pymu_pdf/README.md
@@ -1,6 +1,10 @@
 # PyMuPDF Loader
 
-This loader extracts text from a local PDF file using the `PyMuPDF` Python library. This is the fastest among all other PDF parsing options available in `llama_hub`. If `metadata` is passed as True while calling `load` function; extracted documents will include basic metadata such as page numbers, file path and total number of pages in pdf.
+```bash
+pip install llama-index-readers-file
+```
+
+This loader extracts text from a local PDF file using the `PyMuPDF` Python library. If `metadata` is passed as True while calling `load` function; extracted documents will include basic metadata such as page numbers, file path and total number of pages in pdf.
 
 ## Usage
 
@@ -8,9 +12,8 @@ To use this loader, you need to pass file path of the local file as string or `P
 
 ```python
 from pathlib import Path
-from llama_index import download_loader
 
-PyMuPDFReader = download_loader("PyMuPDFReader")
+from llama_index.readers.file import PyMuPDFReader
 
 loader = PyMuPDFReader()
 documents = loader.load_data(file_path=Path("./article.pdf"), metadata=True)
diff --git a/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/unstructured/README.md b/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/unstructured/README.md
index 5a59f69ab6a873d64104069945fb710a5a6d210c..a08ad57e710094493ba4f9a0cf53ff6a7512d90c 100644
--- a/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/unstructured/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/unstructured/README.md
@@ -1,5 +1,9 @@
 # Unstructured.io File Loader
 
+```bash
+pip install llama-index-readers-file
+```
+
 This loader extracts the text from a variety of unstructured text files using [Unstructured.io](https://github.com/Unstructured-IO/unstructured). Currently, the file extensions that are supported are `.txt`, `.docx`, `.pptx`, `.jpg`, `.png`, `.eml`, `.html`, and `.pdf` documents. A single local file is passed in each time you call `load_data`.
 
 Check out their documentation to see more details, but notably, this enables you to parse the unstructured data of many use-cases. For example, you can download the 10-K SEC filings of public companies (e.g. [Coinbase](https://www.sec.gov/ix?doc=/Archives/edgar/data/0001679788/000167978822000031/coin-20211231.htm)), and feed it directly into this loader without worrying about cleaning up the formatting or HTML tags.
@@ -10,7 +14,7 @@ To use this loader, you need to pass in a `Path` to a local file. Optionally, yo
 
 ```python
 from pathlib import Path
-from llama_hub.file.unstructured import UnstructuredReader
+from llama_index.readers.file import UnstructuredReader
 
 loader = UnstructuredReader()
 documents = loader.load_data(file=Path("./10k_filing.html"))
@@ -20,10 +24,9 @@ You can also easily use this loader in conjunction with `SimpleDirectoryReader`
 
 ```python
 from pathlib import Path
-from llama_index import download_loader
-from llama_index import SimpleDirectoryReader
+from llama_index.core import SimpleDirectoryReader
 
-UnstructuredReader = download_loader("UnstructuredReader")
+from llama_index.readers.file import UnstructuredReader
 
 dir_reader = SimpleDirectoryReader(
     "./data",
diff --git a/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/xml/README.md b/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/xml/README.md
index c7860ddc0e0ee69bf7fea3b98cbb37648be6022e..1fa813908046d897235092dafd43db1093b2f268 100644
--- a/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/xml/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/xml/README.md
@@ -1,5 +1,9 @@
 # XML Loader
 
+```bash
+pip install llama-index-readers-file
+```
+
 This loader extracts the text from a local XML file. A single local file is passed in each time you call `load_data`.
 
 ## Usage
@@ -8,12 +12,11 @@ To use this loader, you need to pass in a `Path` to a local file.
 
 ```python
 from pathlib import Path
-from llama_index import download_loader
 
-XMLReader = download_loader("XMLReader")
+from llama_index.readers.file import XMLReader
 
 loader = XMLReader()
 documents = loader.load_data(file=Path("../example.xml"))
 ```
 
-This loader is designed to be used as a way to load data into [LlamaIndex](https://github.com/run-llama/llama_index/tree/main/llama_index) and/or subsequently used as a Tool in a [LangChain](https://github.com/hwchase17/langchain) Agent. See [here](https://github.com/run-llama/llama-hub/tree/main/llama_hub) for examples.
+This loader is designed to be used as a way to load data into [LlamaIndex](https://github.com/run-llama/llama_index/tree/main/llama_index) and/or subsequently used as a Tool in a [LangChain](https://github.com/hwchase17/langchain) Agent.
diff --git a/llama-index-integrations/readers/llama-index-readers-firebase-realtimedb/README.md b/llama-index-integrations/readers/llama-index-readers-firebase-realtimedb/README.md
index ad5ea2f6e4e405354a1959ccaeaa27597e012781..0894b91d8a622b39ed160d50037166430f22bb8c 100644
--- a/llama-index-integrations/readers/llama-index-readers-firebase-realtimedb/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-firebase-realtimedb/README.md
@@ -1,5 +1,9 @@
 # Firebase Realtime Database Loader
 
+```bash
+pip install llama-index-readers-firebase-realtimedb
+```
+
 This loader retrieves documents from Firebase Realtime Database. The user specifies the Firebase Realtime Database URL and, optionally, the path to a service account key file for authentication.
 
 ## Usage
@@ -7,10 +11,8 @@ This loader retrieves documents from Firebase Realtime Database. The user specif
 Here's an example usage of the FirebaseRealtimeDatabaseReader.
 
 ```python
-from llama_index import download_loader
-
-FirebaseRealtimeDatabaseReader = download_loader(
-    "FirebaseRealtimeDatabaseReader"
+from llama_index.readers.firebase_realtimedb import (
+    FirebaseRealtimeDatabaseReader,
 )
 
 database_url = "<database_url>"
@@ -20,4 +22,4 @@ reader = FirebaseRealtimeDatabaseReader(database_url, service_account_key_path)
 documents = reader.load_data(path)
 ```
 
-This loader is designed to be used as a way to load data into [LlamaIndex](https://github.com/run-llama/llama_index/tree/main/llama_index) and/or subsequently used as a Tool in a [LangChain](https://github.com/hwchase17/langchain) Agent. See [here](https://github.com/emptycrown/llama-hub/tree/main) for examples.
+This loader is designed to be used as a way to load data into [LlamaIndex](https://github.com/run-llama/llama_index/tree/main/llama_index) and/or subsequently used as a Tool in a [LangChain](https://github.com/hwchase17/langchain) Agent.
diff --git a/llama-index-integrations/readers/llama-index-readers-firestore/README.md b/llama-index-integrations/readers/llama-index-readers-firestore/README.md
index fb3d9b3329018a5d5850986fee988b2cad901277..05cad520aeaeec2554f56876700a2984f6aae2f4 100644
--- a/llama-index-integrations/readers/llama-index-readers-firestore/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-firestore/README.md
@@ -1,5 +1,9 @@
 # Firestore Loader
 
+```bash
+pip install llama-index-readers-firestore
+```
+
 This loader loads from a Firestore collection or a specific document from Firestore. The loader assumes your project already has the google cloud credentials loaded. To find out how to set up credentials, [see here](https://cloud.google.com/docs/authentication/provide-credentials-adc).
 
 ## Usage
@@ -9,9 +13,8 @@ To initialize the loader, provide the project-id of the google cloud project.
 ## Initializing the reader
 
 ```python
-from llama_index import download_loader
+from llama_index.readers.firestore import FirestoreReader
 
-FirestoreReader = download_loader("FirestoreReader")
 reader = FirestoreReader(project_id="<Your Project ID>")
 ```
 
diff --git a/llama-index-integrations/readers/llama-index-readers-genius/README.md b/llama-index-integrations/readers/llama-index-readers-genius/README.md
index cce1ff910532e635a381fc89e4962b6b9c7a6eb4..5ebf06cc786451618146a1956dbf252773d6970c 100644
--- a/llama-index-integrations/readers/llama-index-readers-genius/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-genius/README.md
@@ -1,5 +1,9 @@
 # LlamaIndex Readers Integration: Genius
 
+```bash
+pip install llama-index-readers-genius
+```
+
 This loader connects to the Genius API and loads lyrics, metadata, and album art into `Documents`.
 
 As a prerequisite, you will need to register with [Genius API](https://genius.com/api-clients) and create an app in order to get a `client_id` and a `client_secret`. You should then set a `redirect_uri` for the app. The `redirect_uri` does not need to be functional. You should then generate an access token as an instantiator for the GeniusReader.
@@ -60,9 +64,7 @@ Here's an example usage of the GeniusReader. It will retrieve songs that match s
 - **Returns**: List of `Document` objects with song lyrics.
 
 ```python
-from llama_index.core.readers import download_loader
-
-GeniusReader = download_loader("GeniusReader")
+from llama_index.readers.genius import GeniusReader
 
 access_token = "your_generated_access_token"
 
@@ -79,7 +81,7 @@ This loader is designed to be used as a way to load data into [LlamaIndex](https
 ```python
 from llama_index.core import VectorStoreIndex, download_loader
 
-GeniusReader = download_loader("GeniusReader")
+from llama_index.readers.genius import GeniusReader
 
 access_token = "your_generated_access_token"
 
diff --git a/llama-index-integrations/readers/llama-index-readers-gpt-repo/README.md b/llama-index-integrations/readers/llama-index-readers-gpt-repo/README.md
index 286383911b739562f5f0b878f4833e3f1f7f9bcc..2609090e89be838b7264f7f8f1ccce630cec8e7f 100644
--- a/llama-index-integrations/readers/llama-index-readers-gpt-repo/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-gpt-repo/README.md
@@ -1,5 +1,9 @@
 # GPT Repository Loader
 
+```bash
+pip install llama-index-readers-gpt-repo
+```
+
 This loader is an adaptation of https://github.com/mpoon/gpt-repository-loader
 to LlamaHub. Full credit goes to mpoon for coming up with this!
 
@@ -8,9 +12,7 @@ to LlamaHub. Full credit goes to mpoon for coming up with this!
 To use this loader, you need to pass in a path to a local Git repository
 
 ```python
-from llama_index import download_loader
-
-GPTRepoReader = download_loader("GPTRepoReader")
+from llama_index.readers.gpt_repo import GPTRepoReader
 
 loader = GPTRepoReader()
 documents = loader.load_data(
diff --git a/llama-index-integrations/readers/llama-index-readers-graphdb-cypher/README.md b/llama-index-integrations/readers/llama-index-readers-graphdb-cypher/README.md
index e39cf1bfcf9b478abeb438a6216e4bcf9337e8e1..ee59f64a200d33f10248300492d115d12042f65c 100644
--- a/llama-index-integrations/readers/llama-index-readers-graphdb-cypher/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-graphdb-cypher/README.md
@@ -1,5 +1,9 @@
 # Graph Database Cypher Loader
 
+```bash
+pip install llama-index-readers-graphdb-cypher
+```
+
 This loader populates documents from results of Cypher queries from a Graph database endpoint.
 The user specifies a GraphDB endpoint URL with optional credentials to initialize the reader.
 By declaring the Cypher query and optional parameters the loader can fetch the nested result docs.
@@ -14,10 +18,9 @@ Here's an example usage of the `GraphDBCypherReader`.
 You can test out queries directly with the Neo4j labs demo server: demo.neo4jlabs.com or with a free instance https://neo4j.com/aura
 
 ```python
-from llama_index import download_loader
 import os
 
-GraphDBCypherReader = download_loader("GraphDBCypherReader")
+from llama_index.readers.graphdb_cypher import GraphDBCypherReader
 
 uri = "neo4j+s://demo.neo4jlabs.com"
 username = "stackoverflow"
diff --git a/llama-index-integrations/readers/llama-index-readers-graphql/README.md b/llama-index-integrations/readers/llama-index-readers-graphql/README.md
index 4d779719226a0cc10790a3b03b367a722d98c38c..adc08fb338d6450eba587bf06670d23a496a6b56 100644
--- a/llama-index-integrations/readers/llama-index-readers-graphql/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-graphql/README.md
@@ -1,5 +1,9 @@
 # GraphQL Loader
 
+```bash
+pip install llama-index-readers-graphql
+```
+
 This loader loads documents via GraphQL queries from a GraphQL endpoint.
 The user specifies a GraphQL endpoint URL with optional credentials to initialize the reader.
 By declaring the GraphQL query and optional variables (parameters) the loader can fetch the nested result docs.
@@ -10,10 +14,9 @@ Here's an example usage of the GraphQLReader.
 You can test out queries directly [on the site](https://countries.trevorblades.com/)
 
 ```python
-from llama_index import download_loader
 import os
 
-GraphQLReader = download_loader("GraphQLReader")
+from llama_index.readers.graphql import GraphQLReader
 
 uri = "https://countries.trevorblades.com/"
 headers = {}
diff --git a/llama-index-integrations/readers/llama-index-readers-guru/README.md b/llama-index-integrations/readers/llama-index-readers-guru/README.md
index 147a5ef9e69111c3538e8080adcf3c3108a5b040..d580cc60322fa7a989c90ce04a18d5478966772c 100644
--- a/llama-index-integrations/readers/llama-index-readers-guru/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-guru/README.md
@@ -1,5 +1,9 @@
 # Guru Loader
 
+```bash
+pip install llama-index-readers-guru
+```
+
 This loader loads documents from [Guru](https://www.getguru.com/). The user specifies a username and api key to initialize the GuruReader.
 
 Note this is not your password. You need to create a new api key in the admin tab of the portal.
@@ -9,9 +13,7 @@ Note this is not your password. You need to create a new api key in the admin ta
 Here's an example usage of the GuruReader.
 
 ```python
-from llama_index import download_loader
-
-GuruReader = download_loader("GuruReader")
+from llama_index.readers.guru import GuruReader
 
 reader = GuruReader(username="<GURU_USERNAME>", api_key="<GURU_API_KEY>")
 
diff --git a/llama-index-integrations/readers/llama-index-readers-hatena-blog/README.md b/llama-index-integrations/readers/llama-index-readers-hatena-blog/README.md
index 777749cb6004cc064dd78f5117e8d5ceba9b2322..ef17a2892f64cfbe6eead21594eb26363458d0ec 100644
--- a/llama-index-integrations/readers/llama-index-readers-hatena-blog/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-hatena-blog/README.md
@@ -1,5 +1,9 @@
 # Hatena Blog Loader
 
+```bash
+pip install llama-index-readers-hatena-blog
+```
+
 This loader fetches article from your own [Hatena Blog](https://hatenablog.com/) blog posts using the AtomPub API.
 
 You can get AtomPub info from the admin page after logging into Hatena Blog.
@@ -9,10 +13,9 @@ You can get AtomPub info from the admin page after logging into Hatena Blog.
 Here's an example usage of the HatenaBlogReader.
 
 ```python
-from llama_index import download_loader
 import os
 
-HatenaBlogReader = download_loader("HatenaBlogReader")
+from llama_index.readers.hatena_blog import HatenaBlogReader
 
 root_endpoint = os.getenv("ATOM_PUB_ROOT_ENDPOINT")
 api_key = os.getenv("ATOM_PUB_API_KEY")
diff --git a/llama-index-integrations/readers/llama-index-readers-hive/README.md b/llama-index-integrations/readers/llama-index-readers-hive/README.md
index 39e721c1c02d71136819066d3ed8c0bfe89785bc..dd9be6afe87d59a19c7a275c89f6cfdbb18dc7f5 100644
--- a/llama-index-integrations/readers/llama-index-readers-hive/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-hive/README.md
@@ -1,5 +1,9 @@
 # Hive Loader
 
+```bash
+pip install llama-index-readers-hive
+```
+
 The Hive Loader returns a set of texts corresponding to documents from Hive based on the customized query.
 The user initializes the loader with Hive connection args and then using query to fetch data from Hive.
 
@@ -8,9 +12,7 @@ The user initializes the loader with Hive connection args and then using query t
 Here's an example usage of the hiveReader to load 100 documents.
 
 ```python
-from llama_index import download_loader
-
-HiveReader = download_loader("HiveReader")
+from llama_index.readers.hive import HiveReader
 
 reader = HiveReader(
     host="localhost",
@@ -24,4 +26,4 @@ query = "SELECT * FROM p1 LIMIT 100"
 documents = reader.load_data(query=query)
 ```
 
-This loader is designed to be used as a way to load data into [LlamaIndex](https://github.com/run-llama/llama_index/tree/main/llama_index) and/or subsequently used as a Tool in a [LangChain](https://github.com/hwchase17/langchain) Agent. See [here](https://github.com/run-llama/llama-hub/tree/main/llama_hub) for examples.
+This loader is designed to be used as a way to load data into [LlamaIndex](https://github.com/run-llama/llama_index/tree/main/llama_index) and/or subsequently used as a Tool in a [LangChain](https://github.com/hwchase17/langchain) Agent.
diff --git a/llama-index-integrations/readers/llama-index-readers-hubspot/README.md b/llama-index-integrations/readers/llama-index-readers-hubspot/README.md
index d7682e2437a9b79d05717bbf7a8d5789a80a3d3e..0660c3369fc8d4e4e30af0195ff2a6f636951fa0 100644
--- a/llama-index-integrations/readers/llama-index-readers-hubspot/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-hubspot/README.md
@@ -1,5 +1,9 @@
 # Hubspot Loader
 
+```bash
+pip install llama-index-readers-hubspot
+```
+
 This loader loads documents from Hubspot. The user specifies an access token to initialize the HubspotReader.
 
 At the moment, this loader only supports access token authentication. To obtain an access token, you will need to create a private app by following instructions [here](https://developers.hubspot.com/docs/api/private-apps).
@@ -9,10 +13,9 @@ At the moment, this loader only supports access token authentication. To obtain
 Here's an example usage of the HubspotReader.
 
 ```python
-from llama_index import download_loader
 import os
 
-HubspotReader = download_loader("HubspotReader")
+from llama_index.readers.hubspot import HubspotReader
 
 reader = HubspotReader("<HUBSPOT_ACCESS_TOKEN>")
 documents = reader.load_data()
diff --git a/llama-index-integrations/readers/llama-index-readers-huggingface-fs/README.md b/llama-index-integrations/readers/llama-index-readers-huggingface-fs/README.md
index bcfc874039b2486d2f892b5130260f0f442a826d..ebc6ece29e457344cd97c62e666c65b29abb4959 100644
--- a/llama-index-integrations/readers/llama-index-readers-huggingface-fs/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-huggingface-fs/README.md
@@ -1,5 +1,9 @@
 # Hugging Face FS Loader
 
+```bash
+pip install llama-index-readers-huggingface-fs
+```
+
 This loader uses Hugging Face Hub's Filesystem API (> 0.14) to
 load datasets.
 
@@ -12,9 +16,8 @@ To use this loader, you need to pass in a path to a Hugging Face dataset.
 
 ```python
 from pathlib import Path
-from llama_index import download_loader
 
-HuggingFaceFSReader = download_loader("HuggingFaceFSReader")
+from llama_index.readers.huggingface_fs import HuggingFaceFSReader
 
 # load documents
 loader = HuggingFaceFSReader()
diff --git a/llama-index-integrations/readers/llama-index-readers-hwp/README.md b/llama-index-integrations/readers/llama-index-readers-hwp/README.md
index 0f45d30d0103b5782179ecb76961910b2875dd53..a330901c19d5eda04d84a177df19e8ce93c46358 100644
--- a/llama-index-integrations/readers/llama-index-readers-hwp/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-hwp/README.md
@@ -1,5 +1,9 @@
 # HWP Loader
 
+```bash
+pip install llama-index-readers-file
+```
+
 This loader reads the HWP file, which is the format of many official documents in South Korea.
 
 ## Usage
@@ -7,7 +11,7 @@ This loader reads the HWP file, which is the format of many official documents i
 To use this loader, you need to pass in a file name. It's fine whether the file is compressed or not.
 
 ```python
-from llama_hub.hangeul import HWPReader
+from llama_index.readers.file import HWPReader
 from pathlib import Path
 
 hwp_path = Path("/path/to/hwp")
diff --git a/llama-index-integrations/readers/llama-index-readers-imdb-review/README.md b/llama-index-integrations/readers/llama-index-readers-imdb-review/README.md
index 844e67ced77db140842a547dd6bf7f8566bcec81..20d8daf2668a4d3858f749a2bfb89d3de6c72ccf 100644
--- a/llama-index-integrations/readers/llama-index-readers-imdb-review/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-imdb-review/README.md
@@ -1,5 +1,9 @@
 ## IMDB MOVIE REVIEWS LOADER
 
+```bash
+pip install llama-index-readers-imdb-review
+```
+
 This loader fetches all the reviews of a movie or a TV-series from IMDB official site. This loader is working on Windows machine and it requires further debug on Linux. Fixes are on the way
 
 Install the required dependencies
@@ -18,9 +22,7 @@ The IMDB downloader takes in two attributes
 ## Usage
 
 ```python
-from llama_index import download_loader
-
-IMDBReviewsloader = download_loader("IMDBReviews")
+from llama_index.readers.imdb_review import IMDBReviews
 
 loader = IMDBReviews(
     movie_name_year="The Social Network 2010", webdriver_engine="edge"
@@ -47,10 +49,10 @@ This loader can be used with both Langchain and LlamaIndex.
 ### LlamaIndex
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
-from llama_index import VectorStoreIndex
+from llama_index.core import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex
 
-IMDBReviewsloader = download_loader("IMDBReviews")
+from llama_index.readers.imdb_review import IMDBReviews
 
 loader = IMDBReviewsloader(
     movie_name_year="The Social Network 2010",
@@ -72,7 +74,6 @@ print(response)
 ### Langchain
 
 ```python
-from llama_index import download_loader
 from langchain.llms import OpenAI
 from langchain.agents.agent_toolkits.pandas import (
     create_pandas_dataframe_agent,
@@ -81,7 +82,7 @@ from langchain.agents import Tool
 from langchain.agents import initialize_agent
 from langchain.chat_models import ChatOpenAI
 
-IMDBReviewsloader = download_loader("IMDBReviews")
+from llama_index.readers.imdb_review import IMDBReviews
 
 loader = IMDBReviewsloader(
     movie_name_year="The Social Network 2010",
diff --git a/llama-index-integrations/readers/llama-index-readers-intercom/README.md b/llama-index-integrations/readers/llama-index-readers-intercom/README.md
index 7c6c7163198a9108780d66f243fee463ea61aef3..f15265703a64e95e2a8ee9fc6ad1661549210578 100644
--- a/llama-index-integrations/readers/llama-index-readers-intercom/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-intercom/README.md
@@ -1,5 +1,9 @@
 # Intercom Loader
 
+```bash
+pip install llama-index-readers-intercom
+```
+
 This loader fetches the text from Intercom help articles using the Intercom API. It also uses the BeautifulSoup library to parse the HTML and extract the text from the articles.
 
 ## Usage
@@ -7,9 +11,7 @@ This loader fetches the text from Intercom help articles using the Intercom API.
 To use this loader, you need to pass in an Intercom account access token.
 
 ```python
-from llama_index import download_loader
-
-IntercomReader = download_loader("IntercomReader")
+from llama_index.readers.intercom import IntercomReader
 
 loader = IntercomReader(intercom_access_token="my_access_token")
 documents = loader.load_data()
diff --git a/llama-index-integrations/readers/llama-index-readers-jira/README.md b/llama-index-integrations/readers/llama-index-readers-jira/README.md
index a36d63872b7b6783be425bea625fd0d0474e4f3e..2e76552ba9317a4963415f6615adfbfe2a105add 100644
--- a/llama-index-integrations/readers/llama-index-readers-jira/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-jira/README.md
@@ -1,5 +1,9 @@
 # JIRA Reader
 
+```bash
+pip install llama-index-readers-jira
+```
+
 The Jira loader returns a set of issues based on the query provided to the dataloader.
 We can follow two methods to initialize the loader-
 1- basic_auth -> this takes a dict with the following keys
@@ -21,7 +25,7 @@ You can follow this link for more information regarding Oauth2 -> https://develo
 Here's an example of how to use it
 
 ```python
-from llama_hub.jira import JiraReader
+from llama_index.readers.jira import JiraReader
 
 reader = JiraReader(
     email=email, api_token=api_token, server_url="your-jira-server.com"
@@ -32,9 +36,7 @@ documents = reader.load_data(query="project = <your-project>")
 Alternately, you can also use download_loader from llama_index
 
 ```python
-from llama_index import download_loader
-
-JiraReader = download_loader("JiraReader")
+from llama_index.readers.jira import JiraReader
 
 reader = JiraReader(
     email=email, api_token=api_token, server_url="your-jira-server.com"
diff --git a/llama-index-integrations/readers/llama-index-readers-joplin/README.md b/llama-index-integrations/readers/llama-index-readers-joplin/README.md
index bcd7afb5855cfb9f942398b3e70b50f49112b2e9..c8a14e07fd25b7017652de45f62f72925d02b195 100644
--- a/llama-index-integrations/readers/llama-index-readers-joplin/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-joplin/README.md
@@ -1,5 +1,9 @@
 # Joplin (Markdown) Loader
 
+```bash
+pip install llama-index-readers-joplin
+```
+
 > [Joplin](https://joplinapp.org/) is an open source note-taking app. Capture your thoughts and securely access them from any device.
 
 This readme covers how to load documents from a `Joplin` database.
@@ -20,10 +24,10 @@ An alternative to this approach is to export the `Joplin`'s note database to Mar
 Here's an example usage of the JoplinReader.
 
 ```python
-from llama_index import download_loader
 import os
 
-JoplinReader = download_loader("JoplinReader")
+from llama_index.readers.joplin import JoplinReader
+
 documents = JoplinReader(
     access_token="<access_token>"
 ).load_data()  # Returns list of documents
diff --git a/llama-index-integrations/readers/llama-index-readers-kaltura/README.md b/llama-index-integrations/readers/llama-index-readers-kaltura/README.md
index 6aba072088d86e13482c7d2abd7057f9080f368a..1d5fae146e88d22556b1e7d7812b07d0b6e9cede 100644
--- a/llama-index-integrations/readers/llama-index-readers-kaltura/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-kaltura/README.md
@@ -1,5 +1,9 @@
 # Kaltura eSearch Loader
 
+```bash
+pip install llama-index-readers-kaltura-esearch
+```
+
 This loader reads Kaltura Entries from [Kaltura](https://corp.kaltura.com) based on a Kaltura eSearch API call.
 Search queries can be passed as a pre-defined object of KalturaESearchEntryParams, or through a simple free text query.
 The result is a list of documents containing the Kaltura Entries and Captions json.
@@ -64,9 +68,7 @@ Each dictionary in the response represents a Kaltura media entry, where the keys
 First, instantiate the KalturaReader (aka Kaltura Loader) with your Kaltura configuration credentials:
 
 ```python
-from llama_index import download_loader
-
-KalturaESearchReader = download_loader("KalturaESearchReader")
+from llama_index.readers.kaltura_esearch import KalturaESearchReader
 
 loader = KalturaESearchReader(
     partnerId="INSERT_YOUR_PARTNER_ID",
diff --git a/llama-index-integrations/readers/llama-index-readers-kibela/README.md b/llama-index-integrations/readers/llama-index-readers-kibela/README.md
index 97323863e872c37cfdf8d36bc58aa69bc976300e..d4a39b1a455d4ca0da2fe8256320912507f41565 100644
--- a/llama-index-integrations/readers/llama-index-readers-kibela/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-kibela/README.md
@@ -1,5 +1,9 @@
 # Kibela Reader
 
+```bash
+pip install llama-index-readers-kibela
+```
+
 This reader fetches article from your [Kibela](https://kibe.la/) notes using the GraphQL API.
 
 # Usage
@@ -8,7 +12,7 @@ Here's an example of how to use it. You can get your access token from [here](ht
 
 ```python
 import os
-from llama_hub.kibela import KibelaReader
+from llama_index.readers.kibela import KibelaReader
 
 team = os.environ["KIBELA_TEAM"]
 token = os.environ["KIBELA_TOKEN"]
@@ -21,9 +25,8 @@ Alternately, you can also use download_loader from llama_index
 
 ```python
 import os
-from llama_index import download_loader
 
-KibelaReader = download_loader("KibelaReader")
+from llama_index.readers.kibela import KibelaReader
 
 team = os.environ["KIBELA_TEAM"]
 token = os.environ["KIBELA_TOKEN"]
diff --git a/llama-index-integrations/readers/llama-index-readers-lilac/README.md b/llama-index-integrations/readers/llama-index-readers-lilac/README.md
index 0a396b701a2c75f8cb30317e8fa04482d899c6da..210d23f6bbfce25b88a73daa1b45a192cff5adad 100644
--- a/llama-index-integrations/readers/llama-index-readers-lilac/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-lilac/README.md
@@ -1,5 +1,11 @@
 # Lilac reader
 
+```bash
+pip install llama-index-readers-papers
+
+pip install llama-index-readers-lilac
+```
+
 [Lilac](https://lilacml.com/) is an open-source product that helps you analyze, enrich, and clean unstructured data with AI.
 
 It can be used to analyze, clean, structure, and label data that can be used in downstream LlamaIndex and LangChain applications.
@@ -17,11 +23,10 @@ You can use any LlamaIndex loader to load data into Lilac, clean data, and then
 See [this notebook](https://github.com/lilacai/lilac/blob/main/notebooks/LlamaIndexLoader.ipynb) for getting data into Lilac from LlamaHub.
 
 ```python
-from llama_index import download_loader
 import lilac as ll
 
 # See: https://llamahub.ai/l/papers-arxiv
-ArxivReader = download_loader("ArxivReader")
+from llama_index.readers.papers import ArxivReader
 
 loader = ArxivReader()
 documents = loader.load_data(search_query="au:Karpathy")
@@ -49,9 +54,9 @@ ll.start_server(project_dir="./data")
 ### Lilac => LlamaIndex Documents
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 
-LilacReader = download_loader("LilacReader")
+from llama_index.readers.lilac import LilacReader
 
 loader = LilacReader()
 documents = loader.load_data(
diff --git a/llama-index-integrations/readers/llama-index-readers-linear/README.md b/llama-index-integrations/readers/llama-index-readers-linear/README.md
index 59e14cdc7184a8a45f2aac093047bd292d188991..3c3a0f97ccafe987a8e95bd8c6b21599c5987101 100644
--- a/llama-index-integrations/readers/llama-index-readers-linear/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-linear/README.md
@@ -1,5 +1,9 @@
 # Linear Reader
 
+```bash
+pip install llama-index-readers-linear
+```
+
 The Linear loader returns issue based on the query.
 
 ## Usage
@@ -7,7 +11,7 @@ The Linear loader returns issue based on the query.
 Here's an example of how to use it
 
 ```python
-from llama_hub.linear import LinearReader
+from llama_index.readers.linear import LinearReader
 
 reader = LinearReader(api_key=api_key)
 query = """
@@ -38,9 +42,7 @@ documents = reader.load_data(query=query)
 Alternately, you can also use download_loader from llama_index
 
 ```python
-from llama_index import download_loader
-
-LinearReader = download_loader("LinearReader")
+from llama_index.readers.linear import LinearReader
 
 reader = LinearReader(api_key=api_key)
 query = """
diff --git a/llama-index-integrations/readers/llama-index-readers-macrometa-gdn/README.md b/llama-index-integrations/readers/llama-index-readers-macrometa-gdn/README.md
index e2fd8a41bef7bdfea4577ce1e932cc788cd517e5..a9fae3033bdcb15fa639bc2fef6bbc2f63b598b8 100644
--- a/llama-index-integrations/readers/llama-index-readers-macrometa-gdn/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-macrometa-gdn/README.md
@@ -1,5 +1,9 @@
 # Macrometa GDN Loader
 
+```bash
+pip install llama-index-readers-macrometa-gdn
+```
+
 This loader takes in a Macrometa federation URL, API key, and collection name and returns a list of vectors.
 
 ## Usage
@@ -7,9 +11,7 @@ This loader takes in a Macrometa federation URL, API key, and collection name an
 To use this loader, you need to pass the URL and API key through the class constructor, and then load the data using an array of collection names.
 
 ```python
-from llama_index import download_loader
-
-MacrometaGDNReader = download_loader("MacrometaGDNReader")
+from llama_index.readers.macrometa_gdn import MacrometaGDNReader
 
 collections = ["test_collection"]
 loader = MacrometaGDNReader(url="https://api-macrometa.io", apikey="test")
diff --git a/llama-index-integrations/readers/llama-index-readers-mangadex/README.md b/llama-index-integrations/readers/llama-index-readers-mangadex/README.md
index 685c59af9f7350cc2f7020eeb653fcf9678af4aa..893aadd6341063ec3c2be90cb188658fe761b5da 100644
--- a/llama-index-integrations/readers/llama-index-readers-mangadex/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-mangadex/README.md
@@ -1,13 +1,15 @@
 # MangaDex Loader
 
+```bash
+pip install llama-index-readers-mangadex
+```
+
 This loader fetches information from the MangaDex API, by manga title.
 
 ## Usage
 
 ```python
-from llama_index import download_loader
-
-MangaDexReader = download_loader("MangaDexReader")
+from llama_index.readers.mangadex import MangaDexReader
 
 loader = MangaDexReader()
 documents = loader.load_data(
diff --git a/llama-index-integrations/readers/llama-index-readers-mangoapps-guides/README.md b/llama-index-integrations/readers/llama-index-readers-mangoapps-guides/README.md
index 87e03688c81049f5dc9be744ccc724c521ab78d7..51f392bd8c66069018014e1f628c9e35c59ff150 100644
--- a/llama-index-integrations/readers/llama-index-readers-mangoapps-guides/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-mangoapps-guides/README.md
@@ -1,5 +1,9 @@
 # MangoppsGuides Loader
 
+```bash
+pip install llama-index-readers-mangoapps-guides
+```
+
 This loader fetches the text from Mangopps Guides.
 
 ## Usage
@@ -7,9 +11,7 @@ This loader fetches the text from Mangopps Guides.
 To use this loader, you need to pass base url of the MangoppsGuides installation (e.g. `https://guides.mangoapps.com/`) and the limit , i.e. max number of links it should crawl
 
 ```python
-from llama_index import download_loader
-
-MangoppsGuidesReader = download_loader("MangoppsGuidesReader")
+from llama_index.readers.mangoapps_guides import MangoppsGuidesReader
 
 loader = MangoppsGuidesReader()
 documents = loader.load_data(
diff --git a/llama-index-integrations/readers/llama-index-readers-maps/README.md b/llama-index-integrations/readers/llama-index-readers-maps/README.md
index 920dfdd45fd0c128b95a17a3a649bf2604ef179c..47f318eb7507706a64141ad38ad1fe2fef2d72ad 100644
--- a/llama-index-integrations/readers/llama-index-readers-maps/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-maps/README.md
@@ -1,5 +1,9 @@
 # **_Osmmap Loader_**
 
+```bash
+pip install llama-index-readers-maps
+```
+
 The Osmmap Loader will fetch map data from the [Overpass](https://wiki.openstreetmap.org/wiki/Main_Page) api for a certain place or area. Version **Overpass API 0.7.60** is used by this loader.
 
 The api will provide you with all the **nodes, relations, and ways** for the particular region when you request data for a region or location.
@@ -27,9 +31,7 @@ She requires all the nodes, routes, and relations within a five-kilometer radius
 ### And the code snippet looks like
 
 ```python
-from llama_index import download_loader
-
-MapReader = download_loader("OpenMap")
+from llama_index.readers.maps import OpenMap
 
 loader = MapReader()
 documents = loader.load_data(
@@ -46,9 +48,7 @@ documents = loader.load_data(
 - so she search for hospital tag in the [Taginfo](https://taginfo.openstreetmap.org/tags) and she got
 
 ```python
-from llama_index import download_loader
-
-MapReader = download_loader("OpenMap")
+from llama_index.readers.maps import OpenMap
 
 loader = MapReader()
 documents = loader.load_data(
diff --git a/llama-index-integrations/readers/llama-index-readers-memos/README.md b/llama-index-integrations/readers/llama-index-readers-memos/README.md
index 85dfd31c084307d886fd089a23a9b9a648a8b9f1..849e4dd1fedcb2c11aa4ea7f12f6e834e68947df 100644
--- a/llama-index-integrations/readers/llama-index-readers-memos/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-memos/README.md
@@ -1,5 +1,9 @@
 # Memos Loader
 
+```bash
+pip install llama-index-readers-memos
+```
+
 This loader fetches text from self-hosted [memos](https://github.com/usememos/memos).
 
 ## Usage
@@ -7,9 +11,8 @@ This loader fetches text from self-hosted [memos](https://github.com/usememos/me
 To use this loader, you need to specify the host where memos is deployed. If you need to filter, pass the [corresponding parameter](https://github.com/usememos/memos/blob/4fe8476169ecd2fc4b164a25611aae6861e36812/api/memo.go#L76) in `load_data`.
 
 ```python
-from llama_index import download_loader
+from llama_index.readers.memos import MemosReader
 
-MemosReader = download_loader("MemosReader")
 loader = MemosReader("https://demo.usememos.com/")
 documents = loader.load_data({"creatorId": 101})
 ```
diff --git a/llama-index-integrations/readers/llama-index-readers-microsoft-onedrive/README.md b/llama-index-integrations/readers/llama-index-readers-microsoft-onedrive/README.md
index e2df15698bcaae60245c3f631bac75bae1909ba5..05a43f3de03d43b977ed5e3a663c7d5bce221a13 100644
--- a/llama-index-integrations/readers/llama-index-readers-microsoft-onedrive/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-microsoft-onedrive/README.md
@@ -1,5 +1,9 @@
 # Microsoft OneDrive Loader
 
+```bash
+pip install llama-index-readers-microsoft-onedrive
+```
+
 This loader reads files from:
 
 - Microsoft OneDrive Personal [(https://onedrive.live.com/)](https://onedrive.live.com/) and
@@ -61,9 +65,7 @@ For example, the file_id of `https://onedrive.live.com/?cid=0B5AF52BE769DFDE4&id
 #### OneDrive Personal Example Usage:
 
 ```python
-from llama_index import download_loader
-
-OneDriveReader = download_loader("OneDriveReader")
+from llama_index.readers.microsoft_onedrive import OneDriveReader
 
 # User Authentication flow: Replace client id with your own id
 loader = OneDriveReader(client_id="82ee706e-2439-47fa-877a-95048ead9318")
@@ -108,9 +110,7 @@ For example, the path of file "demo_doc.docx" within test subfolder from previou
 #### OneDrive For Business Example Usage:
 
 ```python
-from llama_index import download_loader
-
-OneDriveReader = download_loader("OneDriveReader")
+from llama_index.readers.microsoft_onedrive import OneDriveReader
 
 loader = OneDriveReader(
     client_id="82ee706e-2439-47fa-877a-95048ead9318",
diff --git a/llama-index-integrations/readers/llama-index-readers-microsoft-outlook/README.md b/llama-index-integrations/readers/llama-index-readers-microsoft-outlook/README.md
index 3869c7a6cd386e13f80886a67a96b4363c986c98..c2f556fb67155a1c436ace80271f3439210fd55f 100644
--- a/llama-index-integrations/readers/llama-index-readers-microsoft-outlook/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-microsoft-outlook/README.md
@@ -1,5 +1,9 @@
 # Outlook Local Calendar Loader
 
+```bash
+pip install llama-index-readers-microsoft-outlook
+```
+
 This loader reads your past and upcoming Calendar events from your local Outlook .ost or .pst and parses the relevant info into `Documents`.
 
 It runs on Windows only and has only been tested with Windows 11. It has been designed to have a supoerset of the functionality of the Google Calendar reader.
@@ -11,9 +15,7 @@ Here's an example usage of the OutlookCalendar Reader. It will retrieve up to 10
 It always returns Start, End, Subject, Location, and Organizer attributes and optionally returns additional attributes specified in the `more_attributes` parameter, which, if specified, must be a list of strings eg. ['Body','someotherattribute',...]. Attributes which don't exist in a calendar entry are ignored without warning.
 
 ```python
-from llama_index import download_loader
-
-OutlookCalendarReader = download_loader("OutlookLocalCalendarReader")
+from llama_index.readers.microsoft_outlook import OutlookLocalCalendarReader
 
 loader = OutlookCalendarReader()
 documents = loader.load_data()
@@ -26,9 +28,9 @@ This loader is designed to be used as a way to load data into [LlamaIndex](https
 ### LlamaIndex
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 
-OutlookCalendarReader = download_loader("OutlookLocalCalendarReader")
+from llama_index.readers.microsoft_outlook import OutlookLocalCalendarReader
 
 loader = OutlookCalendarReader(
     start_date="2022-01-01", number_of_documents=1000
diff --git a/llama-index-integrations/readers/llama-index-readers-microsoft-sharepoint/README.md b/llama-index-integrations/readers/llama-index-readers-microsoft-sharepoint/README.md
index e382b0f0131944f52d1b23c00eccb8b9a0c0b663..9ea404eae410fd2f1ac940bbf5129a11e27469b9 100644
--- a/llama-index-integrations/readers/llama-index-readers-microsoft-sharepoint/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-microsoft-sharepoint/README.md
@@ -1,5 +1,9 @@
 # Microsoft SharePoint Reader
 
+```bash
+pip install llama-index-readers-microsoft-sharepoint
+```
+
 The loader loads the files from a folder in sharepoint site.
 
 It also supports traversing recursively through the sub-folders.
@@ -27,9 +31,7 @@ If the files are present in the `Test` folder in SharePoint Site under `root` di
 ![FilePath](file_path_info.png)
 
 ```python
-from llama_index import download_loader
-
-SharePointLoader = download_loader("SharePointReader")
+from llama_index.readers.microsoft_sharepoint import SharePointReader
 
 loader = SharePointLoader(
     client_id="<Client ID of the app>",
diff --git a/llama-index-integrations/readers/llama-index-readers-minio/README.md b/llama-index-integrations/readers/llama-index-readers-minio/README.md
index d0bb5d20dd2d13cda46ddf991fffc178f5a661dd..6fa3d65e706ce052a68828513ec681f69b2a035a 100644
--- a/llama-index-integrations/readers/llama-index-readers-minio/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-minio/README.md
@@ -6,4 +6,4 @@
 
 ## Import
 
-`from llama_index.readers.minio import MinioReader, BotoMinioReader`
+from llama_index.core.readers.minio import MinioReader, BotoMinioReader`
diff --git a/llama-index-integrations/readers/llama-index-readers-minio/llama_index/README.md b/llama-index-integrations/readers/llama-index-readers-minio/llama_index/README.md
index e17e81f4d6bcc95892ee36bbb0a05cafd3aab2b2..3ae38a238236b8707ad9d9b415540fdc2ef79c13 100644
--- a/llama-index-integrations/readers/llama-index-readers-minio/llama_index/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-minio/llama_index/README.md
@@ -13,8 +13,6 @@ To use this loader, you need to pass in the name of your Minio Bucket. After tha
 Otherwise, you may specify a prefix if you only want to parse certain files in the Bucket, or a subdirectory.
 
 ```python
-from llama_index import download_loader
-
 MinioReader = download_loader("BotoMinioReader")
 loader = MinioReader(
     bucket="documents",
@@ -40,8 +38,6 @@ Otherwise, you may specify a prefix if you only want to parse certain files in t
 You can now use the client with a TLS-secured MinIO instance (`minio_secure=True`), even if server's certificate isn't trusted (`minio_cert_check=False`).
 
 ```python
-from llama_index import download_loader
-
 MinioReader = download_loader("MinioReader")
 loader = MinioReader(
     bucket="documents",
diff --git a/llama-index-integrations/readers/llama-index-readers-mondaydotcom/README.md b/llama-index-integrations/readers/llama-index-readers-mondaydotcom/README.md
index 47e4b14b946e79828b6da2db23ce6ae996dee472..fd9d1f696fa5d8de6daade1d6dee9c43ac1cc41a 100644
--- a/llama-index-integrations/readers/llama-index-readers-mondaydotcom/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-mondaydotcom/README.md
@@ -1,5 +1,9 @@
 # Monday Loader
 
+```bash
+pip install llama-index-readers-mondaydotcom
+```
+
 This loader loads data from monday.com. The user specifies an API token to initialize the MondayReader. They then specify a monday.com board id to load in the corresponding Document objects.
 
 ## Usage
@@ -7,9 +11,7 @@ This loader loads data from monday.com. The user specifies an API token to initi
 Here's an example usage of the MondayReader.
 
 ```python
-from llama_index import download_loader
-
-MondayReader = download_loader("MondayReader")
+from llama_index.readers.mondaydotcom import MondayReader
 
 reader = MondayReader("<monday_api_token>")
 documents = reader.load_data("<board_id: int>")
diff --git a/llama-index-integrations/readers/llama-index-readers-nougat-ocr/README.md b/llama-index-integrations/readers/llama-index-readers-nougat-ocr/README.md
index d38c41f7a01ad50c86b97b4424de5cd70d4c2829..56b3285ae302d3be491162970a946e963de2d6b0 100644
--- a/llama-index-integrations/readers/llama-index-readers-nougat-ocr/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-nougat-ocr/README.md
@@ -1,5 +1,9 @@
 # Nougat OCR loader
 
+```bash
+pip install llama-index-readers-nougat-ocr
+```
+
 This loader reads the equations, symbols, and tables included in the PDF.
 
 Users can input the path of the academic PDF document `file` which they want to parse. This OCR understands LaTeX math and tables.
@@ -9,7 +13,7 @@ Users can input the path of the academic PDF document `file` which they want to
 Here's an example usage of the PDFNougatOCR.
 
 ```python
-from llama_hub.nougat_ocr import PDFNougatOCR
+from llama_index.readers.nougat_ocr import PDFNougatOCR
 
 reader = PDFNougatOCR()
 
diff --git a/llama-index-integrations/readers/llama-index-readers-openalex/README.md b/llama-index-integrations/readers/llama-index-readers-openalex/README.md
index a171d0697be41141c1eb3922fe77d0fdcb2ac12a..3ba2fb3b3bc1149b33521fdb763e6f05f4794c69 100644
--- a/llama-index-integrations/readers/llama-index-readers-openalex/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-openalex/README.md
@@ -1,11 +1,15 @@
 # OpenAlex Reader
 
+```bash
+pip install llama-index-readers-openalex
+```
+
 This loader will search for papers in OpenAlex and load them in llama-index. The main advantage of using OpenAlex is that you can search the full-text for Open Access papers as well.
 
 ## Usage
 
 ```python
-from llama_hub.openalex_loader import OpenAlexReader
+from llama_index.readers.openalex import OpenAlexReader
 
 openalex_reader = OpenAlexReader(email="shauryr@gmail.com")
 query = "biases in large language models"
diff --git a/llama-index-integrations/readers/llama-index-readers-opendal/README.md b/llama-index-integrations/readers/llama-index-readers-opendal/README.md
index 86cf7176efcb8936d416f0445a6c407a4ad6c253..8890a2cc0aa781cfc1f0f21376a77395d6fae925 100644
--- a/llama-index-integrations/readers/llama-index-readers-opendal/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-opendal/README.md
@@ -1,5 +1,9 @@
 # OpenDAL Loaders
 
+```bash
+pip install llama-index-readers-opendal
+```
+
 ## Base OpendalReader
 
 This loader parses any file via [Apache OpenDAL](https://github.com/apache/incubator-opendal).
@@ -11,9 +15,7 @@ All files are temporarily downloaded locally and subsequently parsed with `Simpl
 `OpendalReader` can read data from any supported storage services including `s3`, `azblob`, `gcs` and so on.
 
 ```python
-from llama_index import download_loader
-
-OpendalReader = download_loader("OpendalReader")
+from llama_index.readers.opendal import OpendalReader
 
 loader = OpendalReader(
     scheme="s3",
@@ -40,9 +42,7 @@ All files are temporarily downloaded locally and subsequently parsed with `Simpl
 ### Usage
 
 ```python
-from llama_index import download_loader
-
-OpendalAzblobReader = download_loader("OpendalAzblobReader")
+from llama_index.readers.opendal import OpendalAzblobReader
 
 loader = OpendalAzblobReader(
     container="container",
@@ -69,9 +69,7 @@ All files are temporarily downloaded locally and subsequently parsed with `Simpl
 ### Usage
 
 ```python
-from llama_index import download_loader
-
-OpendalGcsReader = download_loader("OpendalGcsReader")
+from llama_index.readers.opendal import OpendalGcsReader
 
 loader = OpendalGcsReader(
     bucket="bucket",
@@ -99,10 +97,6 @@ All files are temporarily downloaded locally and subsequently parsed with `Simpl
 ### Usage
 
 ```python
-from llama_index import download_loader
-
-OpendalS3Reader = download_loader("OpendalS3Reader")
-
 loader = OpendalS3Reader(
     bucket="bucket",
     path="path/to/data/",
diff --git a/llama-index-integrations/readers/llama-index-readers-opensearch/README.md b/llama-index-integrations/readers/llama-index-readers-opensearch/README.md
index 2af8cd63722326a85cb893d2b7d9cd8cfbf208f1..b9a172008b24f3b7b304b2ffda4b6b78776685b7 100644
--- a/llama-index-integrations/readers/llama-index-readers-opensearch/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-opensearch/README.md
@@ -1,5 +1,9 @@
 # Opensearch Loader
 
+```bash
+pip install llama-index-readers-opensearch
+```
+
 The Opensearch Loader returns a set of texts corresponding to documents retrieved from an Opensearch index.
 The user initializes the loader with an Opensearch index. They then pass in a field, and optionally a JSON query DSL object to fetch the fields they want.
 
@@ -8,9 +12,7 @@ The user initializes the loader with an Opensearch index. They then pass in a fi
 Here's an example usage of the OpensearchReader to load 100 documents.
 
 ```python
-from llama_index import download_loader
-
-OpensearchReader = download_loader("OpensearchReader")
+from llama_index.readers.opensearch import OpensearchReader
 
 reader = OpensearchReader(
     host="localhost",
diff --git a/llama-index-integrations/readers/llama-index-readers-pandas-ai/README.md b/llama-index-integrations/readers/llama-index-readers-pandas-ai/README.md
index 7c5dc5d16c2a21e0ae2406105e790e2d3164bd46..15124e6e2f9ec6fdcf8f2b98eb25b1c64b23195e 100644
--- a/llama-index-integrations/readers/llama-index-readers-pandas-ai/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-pandas-ai/README.md
@@ -1,5 +1,9 @@
 # Pandas AI Loader
 
+```bash
+pip install llama-index-readers-pandas-ai
+```
+
 This loader is a light wrapper around the `PandasAI` Python package.
 
 See here: https://github.com/gventuri/pandas-ai.
@@ -10,7 +14,6 @@ you can choose to load in `Document` objects via `load_data`.
 ## Usage
 
 ```python
-from llama_index import download_loader
 from pandasai.llm.openai import OpenAI
 import pandas as pd
 
@@ -47,7 +50,7 @@ df = pd.DataFrame(
 
 llm = OpenAI()
 
-PandasAIReader = download_loader("PandasAIReader")
+from llama_index.readers.pandas_ai import PandasAIReader
 
 # use run_pandas_ai directly
 # set is_conversational_answer=False to get parsed output
diff --git a/llama-index-integrations/readers/llama-index-readers-papers/README.md b/llama-index-integrations/readers/llama-index-readers-papers/README.md
index 54d66b6bc3da6bf7b11128884f234b9e24a92703..7dbebcfb7499b997c748db0d174968e4a5102825 100644
--- a/llama-index-integrations/readers/llama-index-readers-papers/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-papers/README.md
@@ -1,5 +1,9 @@
 # Papers Loaders
 
+```bash
+pip install llama-index-readers-papers
+```
+
 ## Arxiv Papers Loader
 
 This loader fetches the text from the most relevant scientific papers on Arxiv specified by a search query (e.g. "Artificial Intelligence"). For each paper, the abstract is extracted and put in a separate document. The search query may be any string, Arxiv paper id, or a general Arxiv query string (see the full list of capabilities [here](https://info.arxiv.org/help/api/user-manual.html#query_details)).
@@ -9,9 +13,7 @@ This loader fetches the text from the most relevant scientific papers on Arxiv s
 To use this loader, you need to pass in the search query. You may also optionally specify a local directory to temporarily store the paper PDFs (they are deleted automatically) and the maximum number of papers you want to parse for your search query (default is 10).
 
 ```python
-from llama_index import download_loader
-
-ArxivReader = download_loader("ArxivReader")
+from llama_index.readers.papers import ArxivReader
 
 loader = ArxivReader()
 documents = loader.load_data(search_query="au:Karpathy")
@@ -20,9 +22,7 @@ documents = loader.load_data(search_query="au:Karpathy")
 Alternatively, if you would like to load papers and abstracts separately:
 
 ```python
-from llama_index import download_loader
-
-ArxivReader = download_loader("ArxivReader")
+from llama_index.readers.papers import ArxivReader
 
 loader = ArxivReader()
 documents, abstracts = loader.load_papers_and_abstracts(
@@ -41,9 +41,7 @@ This loader fetches the text from the most relevant scientific papers on Pubmed
 To use this loader, you need to pass in the search query. You may also optionally specify the maximum number of papers you want to parse for your search query (default is 10).
 
 ```python
-from llama_index import download_loader
-
-PubmedReader = download_loader("PubmedReader")
+from llama_index.readers.papers import PubmedReader
 
 loader = PubmedReader()
 documents = loader.load_data(search_query="amyloidosis")
diff --git a/llama-index-integrations/readers/llama-index-readers-patentsview/README.md b/llama-index-integrations/readers/llama-index-readers-patentsview/README.md
index 127d653495ab3a5c1918202e4e453c0a987b0568..c283ffa887e4e2734be90000adb91f99cfb8ccb4 100644
--- a/llama-index-integrations/readers/llama-index-readers-patentsview/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-patentsview/README.md
@@ -1,5 +1,9 @@
 # Patentsview Loader
 
+```bash
+pip install llama-index-readers-patentsview
+```
+
 This loader loads patent abstract from `a list of patent numbers` with API provided by [Patentsview](https://patentsview.org/).
 
 ## Usage
@@ -7,9 +11,8 @@ This loader loads patent abstract from `a list of patent numbers` with API provi
 Here'a an example usage of PatentsviewReader.
 
 ```python
-from llama_index import download_loader
+from llama_index.readers.patentsview import PatentsviewReader
 
-PatentsviewReader = download_loader("PatentsviewReader")
 loader = PatentsviewReader()
 patents = ["8848839", "10452978"]
 abstracts = loader.load_data(patents)
diff --git a/llama-index-integrations/readers/llama-index-readers-pdb/README.md b/llama-index-integrations/readers/llama-index-readers-pdb/README.md
index 9997c20ac48ecd25bec75c505f3f89ba718c1dd1..b82f352b14e2926ef84ab22e24110ba932a29c07 100644
--- a/llama-index-integrations/readers/llama-index-readers-pdb/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-pdb/README.md
@@ -1,5 +1,9 @@
 # Protein Data Bank (PDB) publication Loader
 
+```bash
+pip install llama-index-readers-pdb
+```
+
 This loader fetches the abstract of PDB entries using the RCSB (Research Collaboratory for Structural Bioinformatics) or EBI (European Bioinformatics Institute) REST api.
 
 ## Usage
@@ -7,7 +11,7 @@ This loader fetches the abstract of PDB entries using the RCSB (Research Collabo
 To use this loader, simply pass an array of PDB ids into `load_data`:
 
 ```python
-from llama_hub.pdb import PdbAbstractReader
+from llama_index.readers.pdb import PdbAbstractReader
 
 loader = PdbAbstractReader()
 documents = loader.load_data(pdb_id=["1cbs"])
diff --git a/llama-index-integrations/readers/llama-index-readers-pdf-table/README.md b/llama-index-integrations/readers/llama-index-readers-pdf-table/README.md
index bfe9e0271a33310223b2c1703c3cc2fe863f30de..5036097ec1e85450683f99fb539724a389c26084 100644
--- a/llama-index-integrations/readers/llama-index-readers-pdf-table/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-pdf-table/README.md
@@ -1,5 +1,9 @@
 # PDF Table Loader
 
+```bash
+pip install llama-index-readers-pdf-table
+```
+
 This loader reads the tables included in the PDF.
 
 Users can input the PDF `file` and the `pages` from which they want to extract tables, and they can read the tables included on those pages.
@@ -10,7 +14,7 @@ Here's an example usage of the PDFTableReader.
 `pages` parameter is the same as camelot's `pages`. Therefore, you can use patterns such as `all`, `1,2,3`, `10-20`, and so on.
 
 ```python
-from llama_hub.pdf_table import PDFTableReader
+from llama_index.readers.pdf_table import PDFTableReader
 from pathlib import Path
 
 reader = PDFTableReader()
diff --git a/llama-index-integrations/readers/llama-index-readers-preprocess/README.md b/llama-index-integrations/readers/llama-index-readers-preprocess/README.md
index ae514fbdf48937b271bc0edb8a85ea22bdca2fb6..ebd34f3fdea0492eb9fbadc800c8d99db19628ed 100644
--- a/llama-index-integrations/readers/llama-index-readers-preprocess/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-preprocess/README.md
@@ -1,5 +1,9 @@
 # Preprocess Loader
 
+```bash
+pip install llama-index-readers-preprocess
+```
+
 [Preprocess](https://preprocess.co) is an API service that splits any kind of document into optimal chunks of text for use in language model tasks.
 Given documents in input `Preprocess` splits them into chunks of text that respect the layout and semantics of the original document.
 We split the content by taking into account sections, paragraphs, lists, images, data tables, text tables, and slides, and following the content semantics for long texts.
@@ -26,10 +30,9 @@ To chunk a file pass a valid filepath and the reader will start converting and c
 If you want to handle the nodes directly:
 
 ```python
-from llama_index import VectorStoreIndex
-from llama_index import download_loader
+from llama_index.core import VectorStoreIndex
 
-PreprocessReader = download_loader("PreprocessReader")
+from llama_index.readers.preprocess import PreprocessReader
 
 # pass a filepath and get the chunks as nodes
 loader = PreprocessReader(
@@ -45,10 +48,9 @@ query_engine = index.as_query_engine()
 By default load_data() returns a document for each chunk, remember to not apply any splitting to these documents
 
 ```python
-from llama_index import VectorStoreIndex
-from llama_index import download_loader
+from llama_index.core import VectorStoreIndex
 
-PreprocessReader = download_loader("PreprocessReader")
+from llama_index.readers.preprocess import PreprocessReader
 
 # pass a filepath and get the chunks as nodes
 loader = PreprocessReader(
diff --git a/llama-index-integrations/readers/llama-index-readers-rayyan/README.md b/llama-index-integrations/readers/llama-index-readers-rayyan/README.md
index d5dae921eac7c5b530709dc615e43c88a0b66f1c..05c589fa43c8e4aa79d66cf524e1e70bcd3b1fc7 100644
--- a/llama-index-integrations/readers/llama-index-readers-rayyan/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-rayyan/README.md
@@ -1,5 +1,9 @@
 # Rayyan Loader
 
+```bash
+pip install llama-index-readers-rayyan
+```
+
 This loader fetches review articles from [Rayyan](https://www.rayyan.ai/)
 using the [Rayyan SDK](https://github.com/rayyansys/rayyan-python-sdk). All articles
 for a given review are fetched by default unless a filter is specified.
@@ -11,9 +15,8 @@ and optionally the API server URL if different from the default. More details
 about these parameters can be found in the official Rayyan SDK repository.
 
 ```python
-from llama_index import download_loader
+from llama_index.readers.rayyan import RayyanReader
 
-RayyanReader = download_loader("RayyanReader")
 loader = RayyanReader(credentials_path="path/to/rayyan-creds.json")
 ```
 
diff --git a/llama-index-integrations/readers/llama-index-readers-readwise/README.md b/llama-index-integrations/readers/llama-index-readers-readwise/README.md
index 525cfe8b25917da859680f11d2f9269c6f8ffdcc..9aa461c5822f3606723d8683f27ed71f7240c07c 100644
--- a/llama-index-integrations/readers/llama-index-readers-readwise/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-readwise/README.md
@@ -1,5 +1,9 @@
 # Readwise Reader
 
+```bash
+pip install llama-index-readers-readwise
+```
+
 Use Readwise's export API to fetch your highlights from web articles, epubs, pdfs, Kindle, YouTube, and load the resulting text into LLMs.
 
 ## Setup
@@ -12,9 +16,10 @@ Here is an example usage of the Readwise Reader:
 
 ```python
 import os
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
+
+from llama_index.readers.readwise import ReadwiseReader
 
-ReadwiseReader = download_loader("ReadwiseReader")
 token = os.getenv("READWISE_API_KEY")
 loader = ReadwiseReader(api_key=token)
 documents = loader.load_data()
@@ -28,9 +33,10 @@ You can also query for highlights that have been created after a certain time:
 ```python
 import os
 import datetime
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
+
+from llama_index.readers.readwise import ReadwiseReader
 
-ReadwiseReader = download_loader("ReadwiseReader")
 token = os.getenv("READWISE_API_KEY")
 loader = ReadwiseReader(api_key=token)
 seven_days_ago = datetime.datetime.now() - datetime.timedelta(days=7)
diff --git a/llama-index-integrations/readers/llama-index-readers-reddit/README.md b/llama-index-integrations/readers/llama-index-readers-reddit/README.md
index 7153d344f6d81192eabf5c3b1de738e9b3316da4..ea964afc453eb9c1c6c00ba1b75afb9635d9edf0 100644
--- a/llama-index-integrations/readers/llama-index-readers-reddit/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-reddit/README.md
@@ -1,5 +1,9 @@
 # Reddit Reader
 
+```bash
+pip install llama-index-readers-reddit
+```
+
 For any subreddit(s) you're interested in, search for relevant posts using keyword(s) and load the resulting text in the post and and top-level comments into LLMs/ LangChains.
 
 ## Get your Reddit credentials ready
@@ -15,9 +19,9 @@ For any subreddit(s) you're interested in, search for relevant posts using keywo
 ### LlamaIndex
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 
-RedditReader = download_loader("RedditReader")
+from llama_index.readers.reddit import RedditReader
 
 subreddits = ["MachineLearning"]
 search_keys = ["PyTorch", "deploy"]
@@ -35,13 +39,13 @@ index.query("What are the pain points of PyTorch users?")
 ### LangChain
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 
 from langchain.agents import initialize_agent, Tool
 from langchain.llms import OpenAI
 from langchain.chains.conversation.memory import ConversationBufferMemory
 
-RedditReader = download_loader("RedditReader")
+from llama_index.readers.reddit import RedditReader
 
 subreddits = ["MachineLearning"]
 search_keys = ["PyTorch", "deploy"]
diff --git a/llama-index-integrations/readers/llama-index-readers-remote-depth/README.md b/llama-index-integrations/readers/llama-index-readers-remote-depth/README.md
index 48ba8b0ab8159717a80807a17e9e3bc254650c22..886acec8ddeb0125f5cc391a2881716a38ee4cd4 100644
--- a/llama-index-integrations/readers/llama-index-readers-remote-depth/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-remote-depth/README.md
@@ -1,5 +1,9 @@
 # Remote Page/File Loader
 
+```bash
+pip install llama-index-readers-remote-depth
+```
+
 This loader makes it easy to extract the text from the links available in a webpage URL, and extract the links presents in the page. It's based on `RemoteReader` (reading single page), that is based on `SimpleDirectoryReader` (parsing the document if file is a pdf, etc). It is an all-in-one tool for (almost) any group of urls.
 
 You can try with this MIT lecture link, it will be able to extract the syllabus, the PDFs, etc:
@@ -10,9 +14,7 @@ You can try with this MIT lecture link, it will be able to extract the syllabus,
 You need to specify the parameter `depth` to specify how many levels of links you want to extract. For example, if you want to extract the links in the page, and the links in the links in the page, you need to specify `depth=2`.
 
 ```python
-from llama_index import download_loader
-
-RemoteDepthReader = download_loader("RemoteDepthReader")
+from llama_index.readers.remote_depth import RemoteDepthReader
 
 loader = RemoteDepthReader()
 documents = loader.load_data(
diff --git a/llama-index-integrations/readers/llama-index-readers-remote/README.md b/llama-index-integrations/readers/llama-index-readers-remote/README.md
index f9ff15c271201f06ea5bb3466f438a29227aae72..c4f8e64a1c8f08ef60880af70901bec9366a3662 100644
--- a/llama-index-integrations/readers/llama-index-readers-remote/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-remote/README.md
@@ -1,5 +1,9 @@
 # Remote Page/File Loader
 
+```bash
+pip install llama-index-readers-remote
+```
+
 This loader makes it easy to extract the text from any remote page or file using just its url. If there's a file at the url, this loader will download it temporarily and parse it using `SimpleDirectoryReader`. It is an all-in-one tool for (almost) any url.
 
 As a result, any page or type of file is supported. For instance, if a `.txt` url such as a [Project Gutenberg book](https://www.gutenberg.org/cache/epub/69994/pg69994.txt) is passed in, the text will be parsed as is. On the other hand, if a hosted .mp3 url is passed in, it will be downloaded and parsed using `AudioTranscriber`.
@@ -9,9 +13,7 @@ As a result, any page or type of file is supported. For instance, if a `.txt` ur
 To use this loader, you need to pass in a `Path` to a local file. Optionally, you may specify a `file_extractor` for the `SimpleDirectoryReader` to use, other than the default one.
 
 ```python
-from llama_index import download_loader
-
-RemoteReader = download_loader("RemoteReader")
+from llama_index.readers.remote import RemoteReader
 
 loader = RemoteReader()
 documents = loader.load_data(
diff --git a/llama-index-integrations/readers/llama-index-readers-s3/README.md b/llama-index-integrations/readers/llama-index-readers-s3/README.md
index f4413b0f5a2aed9204e18e8ad41daf5b08542b2d..96c3ed4d95f2703f25c27966d01eb4d85c6fb79f 100644
--- a/llama-index-integrations/readers/llama-index-readers-s3/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-s3/README.md
@@ -11,10 +11,6 @@ To use this loader, you need to pass in the name of your S3 Bucket. After that,
 Otherwise, you may specify a prefix if you only want to parse certain files in the Bucket, or a subdirectory. AWS Access Key credentials may either be passed in during initialization or stored locally (see above).
 
 ```python
-from llama_index import download_loader
-
-S3Reader = download_loader("S3Reader")
-
 loader = S3Reader(
     bucket="scrabble-dictionary",
     key="dictionary.txt",
diff --git a/llama-index-integrations/readers/llama-index-readers-sec-filings/README.md b/llama-index-integrations/readers/llama-index-readers-sec-filings/README.md
index 15d0155f31f0442a04a8e46ccdb70bf5a0462895..4a2e0a33abd0202878eb619153dd849634d3d255 100644
--- a/llama-index-integrations/readers/llama-index-readers-sec-filings/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-sec-filings/README.md
@@ -1,5 +1,9 @@
 # SEC DATA DOWNLOADER
 
+```bash
+pip install llama-index-readers-sec-filings
+```
+
 Please checkout this repo that I am building on SEC Question Answering Agent [SEC-QA](https://github.com/Athe-kunal/SEC-QA-Agent)
 
 This repository downloads all the texts from SEC documents (10-K and 10-Q). Currently, it is not supporting documents that are amended, but that will be added in the near futures.
@@ -21,9 +25,7 @@ The SEC Downloader expects 5 attributes
 ## Usage
 
 ```python
-from llama_index import download_loader
-
-SECFilingsLoader = download_loader("SECFilingsLoader")
+from llama_index.readers.sec_filings import SECFilingsLoader
 
 loader = SECFilingsLoader(tickers=["TSLA"], amount=3, filing_type="10-K")
 loader.load_data()
@@ -95,10 +97,10 @@ This loader is can be used with both Langchain and LlamaIndex.
 ### LlamaIndex
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
-from llama_index import SimpleDirectoryReader
+from llama_index.core import VectorStoreIndex, download_loader
+from llama_index.core import SimpleDirectoryReader
 
-SECFilingsLoader = download_loader("SECFilingsLoader")
+from llama_index.readers.sec_filings import SECFilingsLoader
 
 loader = SECFilingsLoader(tickers=["TSLA"], amount=3, filing_type="10-K")
 loader.load_data()
@@ -111,13 +113,12 @@ index.query("What are the risk factors of Tesla for the year 2022?")
 ### Langchain
 
 ```python
-from llama_index import download_loader
 from langchain.llms import OpenAI
 from langchain.chains import RetrievalQA
 from langchain.document_loaders import DirectoryLoader
 from langchain.indexes import VectorstoreIndexCreator
 
-SECFilingsLoader = download_loader("SECFilingsLoader")
+from llama_index.readers.sec_filings import SECFilingsLoader
 
 loader = SECFilingsLoader(tickers=["TSLA"], amount=3, filing_type="10-K")
 loader.load_data()
diff --git a/llama-index-integrations/readers/llama-index-readers-semanticscholar/README.md b/llama-index-integrations/readers/llama-index-readers-semanticscholar/README.md
index 0242f63cd6e20222711b3f787e35560dd97a9318..08b6a486f6053d52263d466b478f7ddb885d40e9 100644
--- a/llama-index-integrations/readers/llama-index-readers-semanticscholar/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-semanticscholar/README.md
@@ -1,5 +1,11 @@
 # Semantic Scholar Loader
 
+```bash
+pip install llama-index-readers-semanticscholar
+
+pip install llama-index-llms-openai
+```
+
 Welcome to Semantic Scholar Loader. This module serves as a crucial utility for researchers and professionals looking to get scholarly articles and publications from the Semantic Scholar database.
 
 For any research topic you are interested in, this loader reads relevant papers from a search result in Semantic Scholar into `Documents`.
@@ -27,13 +33,10 @@ Here is an example of how to use this loader in `llama_index` and get citations
 ### LlamaIndex
 
 ```python
-from llama_index.llms import OpenAI
-from llama_index.query_engine import CitationQueryEngine
-from llama_index import (
-    VectorStoreIndex,
-    ServiceContext,
-)
-from llama_hub.semanticscholar import SemanticScholarReader
+from llama_index.llms.openai import OpenAI
+from llama_index.core.query_engine import CitationQueryEngine
+from llama_index.core import VectorStoreIndex, ServiceContext
+from llama_index.readers.semanticscholar import SemanticScholarReader
 
 s2reader = SemanticScholarReader()
 
diff --git a/llama-index-integrations/readers/llama-index-readers-singlestore/README.md b/llama-index-integrations/readers/llama-index-readers-singlestore/README.md
index 5ab20d4d6c46f27313639c0e387e9a2bdfa9b779..1b98a72969891b6358a1c18973dc472786bb0ece 100644
--- a/llama-index-integrations/readers/llama-index-readers-singlestore/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-singlestore/README.md
@@ -1,5 +1,9 @@
 # SingleStore Loader
 
+```bash
+pip install llama-index-readers-singlestore
+```
+
 The SingleStore Loader retrieves a set of documents from a specified table in a SingleStore database. The user initializes the loader with database information and then provides a search embedding for retrieving similar documents.
 
 ## Usage
@@ -7,7 +11,7 @@ The SingleStore Loader retrieves a set of documents from a specified table in a
 Here's an example usage of the SingleStoreReader:
 
 ```python
-from llama_hub.singlestore import SingleStoreReader
+from llama_index.readers.singlestore import SingleStoreReader
 
 # Initialize the reader with your SingleStore database credentials and other relevant details
 reader = SingleStoreReader(
diff --git a/llama-index-integrations/readers/llama-index-readers-smart-pdf-loader/README.md b/llama-index-integrations/readers/llama-index-readers-smart-pdf-loader/README.md
index ceccf5377e664e73d8e3265779a01e38b3804251..1184ce086a6a6306609c36bd5b17fd0a0ebfbd68 100644
--- a/llama-index-integrations/readers/llama-index-readers-smart-pdf-loader/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-smart-pdf-loader/README.md
@@ -1,5 +1,9 @@
 # Smart PDF Loader
 
+```bash
+pip install llama-index-readers-smart-pdf-loader
+```
+
 SmartPDFLoader is a super fast PDF reader that understands the layout structure of PDFs such as nested sections, nested lists, paragraphs and tables.
 It uses layout information to smartly chunk PDFs into optimal short contexts for LLMs.
 
@@ -16,7 +20,7 @@ pip install llmsherpa
 Here's an example usage of the SmartPDFLoader:
 
 ```python
-from llama_hub.smart_pdf_loader import SmartPDFLoader
+from llama_index.readers.smart_pdf_loader import SmartPDFLoader
 
 llmsherpa_api_url = "https://readers.llmsherpa.com/api/document/developer/parseDocument?renderFormat=all"
 pdf_url = "https://arxiv.org/pdf/1910.13461.pdf"  # also allowed is a file path e.g. /home/downloads/xyz.pdf
@@ -27,7 +31,7 @@ documents = pdf_loader.load_data(pdf_url)
 Now you can use the documents with other LlamaIndex components. For example, for retrieval augmented generation, try this:
 
 ```python
-from llama_index import VectorStoreIndex
+from llama_index.core import VectorStoreIndex
 
 index = VectorStoreIndex.from_documents(documents)
 query_engine = index.as_query_engine()
diff --git a/llama-index-integrations/readers/llama-index-readers-snowflake/README.md b/llama-index-integrations/readers/llama-index-readers-snowflake/README.md
index d0302f8343088be67ea139e21ebcc8c8c5ac13b7..c6c0daec2cf5b060843e97e18e1bcfa8c5f2a914 100644
--- a/llama-index-integrations/readers/llama-index-readers-snowflake/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-snowflake/README.md
@@ -1,5 +1,9 @@
 # Snowflake Loader
 
+```bash
+pip install llama-index-readers-snowflake
+```
+
 This loader connects to Snowflake (using SQLAlchemy under the hood). The user specifies a query and extracts Document objects corresponding to the results. You can use this loader to easily connect to a database on Snowflake and pass the documents into a `GPTSQLStructStoreIndex` from LlamaIndex.
 
 ## Usage
@@ -9,9 +13,7 @@ This loader connects to Snowflake (using SQLAlchemy under the hood). The user sp
 Here's an example usage of the SnowflakeReader.
 
 ```python
-from llama_index import download_loader
-
-SnowflakeReader = download_loader("SnowflakeReader")
+from llama_index.readers.snowflake import SnowflakeReader
 
 reader = SnowflakeReader(
     engine=your_sqlalchemy_engine,
@@ -27,9 +29,7 @@ documents = reader.load_data(query=query)
 Here's an example usage of the SnowflakeReader.
 
 ```python
-from llama_index import download_loader
-
-SnowflakeReader = download_loader("SnowflakeReader")
+from llama_index.readers.snowflake import SnowflakeReader
 
 reader = SnowflakeReader(
     account="your_account",
diff --git a/llama-index-integrations/readers/llama-index-readers-snscrape-twitter/README.md b/llama-index-integrations/readers/llama-index-readers-snscrape-twitter/README.md
index 681155514845ad456b728486e80fe346c5b4f224..e6d7b0f9aee60bf6d7ac58432ee0fc324fe31028 100644
--- a/llama-index-integrations/readers/llama-index-readers-snscrape-twitter/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-snscrape-twitter/README.md
@@ -1,5 +1,9 @@
 # Snscrape twitter Loader
 
+```bash
+pip install llama-index-readers-snscrape-twitter
+```
+
 This loader loads documents from Twitter using the Snscrape Python package.
 
 ## Usage
@@ -7,10 +11,9 @@ This loader loads documents from Twitter using the Snscrape Python package.
 Here's an example usage of the SnscrapeReader.
 
 ```python
-from llama_index import download_loader
 import os
 
-SnscrapeReader = download_loader("SnscrapeTwitterReader")
+from llama_index.readers.snscrape_twitter import SnscrapeTwitterReader
 
 loader = SnscrapeReader()
 documents = loader.load_data(username="elonmusk", num_tweets=10)
diff --git a/llama-index-integrations/readers/llama-index-readers-spotify/README.md b/llama-index-integrations/readers/llama-index-readers-spotify/README.md
index 502d91d282b8ab550739e3d3bb2228730bcf65f6..59cc209d622e374169689ca8b69dab7e6c995236 100644
--- a/llama-index-integrations/readers/llama-index-readers-spotify/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-spotify/README.md
@@ -1,5 +1,9 @@
 # Spotify Loader
 
+```bash
+pip install llama-index-readers-spotify
+```
+
 This loader reads your Spotify account and loads saved albums, tracks, or playlists into `Documents`.
 
 As a prerequisite, you will need to register with [Spotify for Developers](https://developer.spotify.com) and create an app in order to get a `client_id` and a `client_secret`. You should then set a `redirect_uri` for the app (in the web dashboard under app settings). The `redirect_uri` does not need to be functional. You should then set the `client_id`, `client_secret`, and `redirect_uri` as environmental variables.
@@ -13,9 +17,7 @@ As a prerequisite, you will need to register with [Spotify for Developers](https
 Here's an example usage of the SpotifyReader. It will retrieve your saved albums, unless an optional `collection` argument is passed. Acceptable arguments are "albums", "tracks", and "playlists".
 
 ```python
-from llama_index import download_loader
-
-SpotifyReader = download_loader("SpotifyReader")
+from llama_index.readers.spotify import SpotifyReader
 
 loader = SpotifyReader()
 documents = loader.load_data()
@@ -28,9 +30,9 @@ This loader is designed to be used as a way to load data into [LlamaIndex](https
 ### LlamaIndex
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 
-SpotifyReader = download_loader("SpotifyReader")
+from llama_index.readers.spotify import SpotifyReader
 
 loader = SpotifyReader()
 documents = loader.load_data()
diff --git a/llama-index-integrations/readers/llama-index-readers-stripe-docs/README.md b/llama-index-integrations/readers/llama-index-readers-stripe-docs/README.md
index 33d669b7aeb49ca5edc6aefa9f7cd78799702e9a..15dddba1fab8efd6748d183664de7babb3265484 100644
--- a/llama-index-integrations/readers/llama-index-readers-stripe-docs/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-stripe-docs/README.md
@@ -1,5 +1,9 @@
 # StripeDocs Loader
 
+```bash
+pip install llama-index-readers-stripe-docs
+```
+
 This loader asynchronously loads data from the [Stripe documentation](https://stripe.com/docs). It iterates through the Stripe sitemap to get all `/docs` references.
 
 It is based on the [Async Website Loader](https://llamahub.ai/l/web-async_web).
@@ -7,8 +11,8 @@ It is based on the [Async Website Loader](https://llamahub.ai/l/web-async_web).
 ## Usage
 
 ```python
-from llama_index import VectorStoreIndex
-from llama_hub.stripe_docs import StripeDocsReader
+from llama_index.core import VectorStoreIndex
+from llama_index.readers.stripe_docs import StripeDocsReader
 
 loader = StripeDocsReader()
 documents = loader.load_data()
diff --git a/llama-index-integrations/readers/llama-index-readers-telegram/README.md b/llama-index-integrations/readers/llama-index-readers-telegram/README.md
index 0ee82e3344510d384e289fe3ef55310c9aa5e6e7..6b05fdc06422368fac24161ca5ee13bda07dd2a4 100644
--- a/llama-index-integrations/readers/llama-index-readers-telegram/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-telegram/README.md
@@ -1,5 +1,9 @@
 # Telegram Loader
 
+```bash
+pip install llama-index-readers-telegram
+```
+
 This loader fetches posts/chat messages/comments from Telegram channels or chats into `Document`s.
 
 Before working with Telegram’s API, you need to get your own API ID and hash:
@@ -31,9 +35,8 @@ If the `.session` file already existed, it will not login again, so be aware of
 To use this loader, you simply need to pass in a entity name.
 
 ```python
-from llama_index.core import download_loader
+from llama_index.readers.telegram import TelegramReader
 
-TelegramReader = download_loader("TelegramReader")
 loader = TelegramReader(
     session_name="[YOUR_SESSION_NAME]",
     api_id="[YOUR_API_ID]",
diff --git a/llama-index-integrations/readers/llama-index-readers-trello/README.md b/llama-index-integrations/readers/llama-index-readers-trello/README.md
index 787e53605bb802e76923efbffb99270de325d702..aaf2fe018b78a971f9616ea4c991f4c102556594 100644
--- a/llama-index-integrations/readers/llama-index-readers-trello/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-trello/README.md
@@ -1,5 +1,9 @@
 # Trello Loader
 
+```bash
+pip install llama-index-readers-trello
+```
+
 This loader loads documents from Trello. The user specifies an API key and API token to initialize the TrelloReader. They then specify a board_id to
 load in the corresponding Document objects representing Trello cards.
 
@@ -8,10 +12,9 @@ load in the corresponding Document objects representing Trello cards.
 Here's an example usage of the TrelloReader.
 
 ```python
-from llama_index import download_loader
 import os
 
-TrelloReader = download_loader("TrelloReader")
+from llama_index.readers.trello import TrelloReader
 
 reader = TrelloReader("<Trello_API_KEY>", "<Trello_API_TOKEN>")
 documents = reader.load_data(board_id="<BOARD_ID>")
diff --git a/llama-index-integrations/readers/llama-index-readers-weather/README.md b/llama-index-integrations/readers/llama-index-readers-weather/README.md
index f20eb54baa6cf33c1ef07d4bb963029baa6d2f36..93de04fc5ceefc9b77d8599ba3442213b1c10feb 100644
--- a/llama-index-integrations/readers/llama-index-readers-weather/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-weather/README.md
@@ -1,5 +1,9 @@
 # Weather Loader
 
+```bash
+pip install llama-index-readers-weather
+```
+
 This loader fetches the weather data from the [OpenWeatherMap](https://openweathermap.org/api)'s OneCall API, using the `pyowm` Python package. You must initialize the loader with your OpenWeatherMap API token, and then pass in the names of the cities you want the weather data for.
 
 OWM's One Call API provides the following weather data for any geographical coordinate: - Current weather - Hourly forecast for 48 hours - Daily forecast for 7 days
@@ -9,9 +13,7 @@ OWM's One Call API provides the following weather data for any geographical coor
 To use this loader, you need to pass in an array of city names (eg. [chennai, chicago]). Pass in the country codes as well for better accuracy.
 
 ```python
-from llama_index import download_loader
-
-WeatherReader = download_loader("WeatherReader")
+from llama_index.readers.weather import WeatherReader
 
 loader = WeatherReader(token="[YOUR_TOKEN]")
 documents = loader.load_data(places=["Chennai, IN", "Dublin, IE"])
diff --git a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/async_web/README.md b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/async_web/README.md
index 44c8985196a367e4e394a91f231099b50ee33391..0706ae999e574435a1c4d4c3a5bd029da9a21bcf 100644
--- a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/async_web/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/async_web/README.md
@@ -1,5 +1,9 @@
 # Async Website Loader
 
+```bash
+pip install llama-index-readers-web
+```
+
 This loader is an asynchronous web scraper that fetches the text from static websites by converting the HTML to text.
 
 ## Usage
@@ -7,7 +11,7 @@ This loader is an asynchronous web scraper that fetches the text from static web
 To use this loader, you need to pass in an array of URLs.
 
 ```python
-from llama_index.readers.web.async_web.base import AsyncWebPageReader
+from llama_index.readers.web import AsyncWebPageReader
 
 # for jupyter notebooks uncomment the following two lines of code:
 # import nest_asyncio
diff --git a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/beautiful_soup_web/README.md b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/beautiful_soup_web/README.md
index 331cd5fce7661492bc15d455ca66d91a0e7a5441..f3506e6818ad621e81bc5ab6b3151e76b628b5d8 100644
--- a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/beautiful_soup_web/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/beautiful_soup_web/README.md
@@ -1,5 +1,9 @@
 # Beautiful Soup Website Loader
 
+```bash
+pip install llama-index-readers-web
+```
+
 This loader is a web scraper that fetches the text from websites using the `Beautiful Soup` (aka `bs4`) Python package. Furthermore, the flexibility of Beautiful Soup allows for custom templates that enable the loader to extract the desired text from specific website designs, such as Substack. Check out the code to see how to add your own.
 
 ## Usage
@@ -7,9 +11,7 @@ This loader is a web scraper that fetches the text from websites using the `Beau
 To use this loader, you need to pass in an array of URLs.
 
 ```python
-from llama_index import download_loader
-
-BeautifulSoupWebReader = download_loader("BeautifulSoupWebReader")
+from llama_index.readers.web import BeautifulSoupWebReader
 
 loader = BeautifulSoupWebReader()
 documents = loader.load_data(urls=["https://google.com"])
@@ -38,9 +40,9 @@ This loader is designed to be used as a way to load data into [LlamaIndex](https
 ### LlamaIndex
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 
-BeautifulSoupWebReader = download_loader("BeautifulSoupWebReader")
+from llama_index.readers.web import BeautifulSoupWebReader
 
 loader = BeautifulSoupWebReader()
 documents = loader.load_data(urls=["https://google.com"])
@@ -53,12 +55,12 @@ index.query("What language is on this website?")
 Note: Make sure you change the description of the `Tool` to match your use-case.
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 from langchain.agents import initialize_agent, Tool
 from langchain.llms import OpenAI
 from langchain.chains.conversation.memory import ConversationBufferMemory
 
-BeautifulSoupWebReader = download_loader("BeautifulSoupWebReader")
+from llama_index.readers.web import BeautifulSoupWebReader
 
 loader = BeautifulSoupWebReader()
 documents = loader.load_data(urls=["https://google.com"])
diff --git a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/knowledge_base/README.md b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/knowledge_base/README.md
index 816e70f3e7a614573cf6a715a2148c42f614a727..397380d64829d30decac131dee9164d6c5856404 100644
--- a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/knowledge_base/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/knowledge_base/README.md
@@ -1,5 +1,9 @@
 # Knowledge Base Website Loader
 
+```bash
+pip install llama-index-readers-web
+```
+
 This loader is a web crawler and scraper that fetches text content from websites hosting public knowledge bases. Examples are the [Intercom help center](https://www.intercom.com/help/en/) or the [Robinhood help center](https://robinhood.com/us/en/support/). Typically these sites have a directory structure with several sections and many articles in each section. This loader crawls and finds all links that match the article path provided, and scrapes the content of each article. This can be used to create bots that answer customer questions based on public documentation.
 
 It uses [Playwright](https://playwright.dev/python/) to drive a browser. This reduces the chance of getting blocked by Cloudflare or other CDNs, but makes it a bit more challenging to run on cloud services.
@@ -17,9 +21,7 @@ This installs the browsers that Playwright requires.
 To use this loader, you need to pass in the root URL and the string to search for in the URL to tell if the crawler has reached an article. You also need to pass in several CSS selectors so the cralwer knows which links to follow and which elements to extract content from. use
 
 ```python
-from llama_index import download_loader
-
-KnowledgeBaseWebReader = download_loader("KnowledgeBaseWebReader")
+from llama_index.readers.web import KnowledgeBaseWebReader
 
 loader = KnowledgeBaseWebReader()
 documents = loader.load_data(
@@ -39,9 +41,9 @@ This loader is designed to be used as a way to load data into [LlamaIndex](https
 ### LlamaIndex
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 
-KnowledgeBaseWebReader = download_loader("KnowledgeBaseWebReader")
+from llama_index.readers.web import KnowledgeBaseWebReader
 
 loader = KnowledgeBaseWebReader()
 documents = loader.load_data(
@@ -61,12 +63,12 @@ index.query("What languages does Intercom support?")
 Note: Make sure you change the description of the `Tool` to match your use-case.
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 from langchain.agents import initialize_agent, Tool
 from langchain.llms import OpenAI
 from langchain.chains.conversation.memory import ConversationBufferMemory
 
-KnowledgeBaseWebReader = download_loader("KnowledgeBaseWebReader")
+from llama_index.readers.web import KnowledgeBaseWebReader
 
 loader = KnowledgeBaseWebReader()
 documents = loader.load_data(
diff --git a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/main_content_extractor/README.md b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/main_content_extractor/README.md
index 6fb33b7b5e7b547549e78a515a04b6afdd0fbcd3..1dea93a5890da20f2dffae1c7f013599d556ee3b 100644
--- a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/main_content_extractor/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/main_content_extractor/README.md
@@ -1,5 +1,9 @@
 # MainContentExtractor Website Loader
 
+```bash
+pip install llama-index-readers-web
+```
+
 This loader is a web scraper that fetches the text from static websites using the `MainContentExtractor` Python package.
 
 For information on how to extract main content, README in the following github repository
@@ -11,9 +15,7 @@ For information on how to extract main content, README in the following github r
 To use this loader, you need to pass in an array of URLs.
 
 ```python
-from llama_index import download_loader
-
-MainContentExtractorReader = download_loader("MainContentExtractorReader")
+from llama_index.readers.web import MainContentExtractorReader
 
 loader = MainContentExtractorReader()
 documents = loader.load_data(urls=["https://google.com"])
@@ -24,9 +26,9 @@ documents = loader.load_data(urls=["https://google.com"])
 ### LlamaIndex
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 
-MainContentExtractorReader = download_loader("MainContentExtractorReader")
+from llama_index.readers.web import MainContentExtractorReader
 
 loader = MainContentExtractorReader()
 documents = loader.load_data(urls=["https://google.com"])
@@ -39,12 +41,12 @@ index.query("What language is on this website?")
 Note: Make sure you change the description of the `Tool` to match your use-case.
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 from langchain.agents import initialize_agent, Tool
 from langchain.llms import OpenAI
 from langchain.chains.conversation.memory import ConversationBufferMemory
 
-MainContentExtractorReader = download_loader("MainContentExtractorReader")
+from llama_index.readers.web import MainContentExtractorReader
 
 loader = MainContentExtractorReader()
 documents = loader.load_data(urls=["https://google.com"])
diff --git a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/news/README.md b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/news/README.md
index a20e912379d8073b045b480e7860ebe6670f2576..b56b7b83265b4ea0807b03d8b4586ba199e96cdd 100644
--- a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/news/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/news/README.md
@@ -1,5 +1,9 @@
 # News Article Loader
 
+```bash
+pip install llama-index-readers-web
+```
+
 This loader makes use of the `newspaper3k` library to parse web page urls which have news
 articles in them.
 
@@ -12,7 +16,7 @@ pip install newspaper3k
 Pass in an array of individual page URLs:
 
 ```python
-from llama_index.readers.web.news import NewsArticleReader
+from llama_index.readers.web import NewsArticleReader
 
 reader = NewsArticleReader(use_nlp=False)
 documents = reader.load_data(
diff --git a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/readability_web/README.md b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/readability_web/README.md
index 75da465912bc2736b2424989baa23dbcfaeb23d5..d41b4fe2d97afaafebb63aca92114c06822eabd2 100644
--- a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/readability_web/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/readability_web/README.md
@@ -1,5 +1,9 @@
 # Readability Webpage Loader
 
+```bash
+pip install llama-index-readers-web
+```
+
 Extracting relevant information from a fully rendered web page.
 During the processing, it is always assumed that web pages used as data sources contain textual content.
 
@@ -13,9 +17,7 @@ It is particularly effective for websites that use client-side rendering.
 To use this loader, you need to pass in a single of URL.
 
 ```python
-from llama_index import download_loader
-
-ReadabilityWebPageReader = download_loader("ReadabilityWebPageReader")
+from llama_index.readers.web import ReadabilityWebPageReader
 
 # or set proxy server for playwright: loader = ReadabilityWebPageReader(proxy="http://your-proxy-server:port")
 # For some specific web pages, you may need to set "wait_until" to "networkidle". loader = ReadabilityWebPageReader(wait_until="networkidle")
@@ -33,9 +35,7 @@ This loader is designed to be used as a way to load data into [LlamaIndex](https
 ### LlamaIndex
 
 ```python
-from llama_index import download_loader
-
-ReadabilityWebPageReader = download_loader("ReadabilityWebPageReader")
+from llama_index.readers.web import ReadabilityWebPageReader
 
 loader = ReadabilityWebPageReader()
 documents = loader.load_data(
@@ -51,12 +51,12 @@ print(index.query("What is pages?"))
 Note: Make sure you change the description of the `Tool` to match your use-case.
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 from langchain.agents import initialize_agent, Tool
 from langchain.llms import OpenAI
 from langchain.chains.conversation.memory import ConversationBufferMemory
 
-ReadabilityWebPageReader = download_loader("ReadabilityWebPageReader")
+from llama_index.readers.web import ReadabilityWebPageReader
 
 loader = ReadabilityWebPageReader()
 documents = loader.load_data(
diff --git a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/rss/README.md b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/rss/README.md
index 4431bbb140f2864dc8bc0c57dd52d38fb93a32af..5e4e4e5d5440bfae335da8d403cca6baa2b8da68 100644
--- a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/rss/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/rss/README.md
@@ -1,5 +1,9 @@
 # RSS Loader
 
+```bash
+pip install llama-index-readers-web
+```
+
 This loader allows fetching text from an RSS feed. It uses the `feedparser` module
 to fetch the feed and optionally the `html2text` module to sanitize it.
 
@@ -8,9 +12,7 @@ to fetch the feed and optionally the `html2text` module to sanitize it.
 To use this loader, pass in an array of URL's.
 
 ```python
-from llama_index import download_loader
-
-RssReader = download_loader("RssReader")
+from llama_index.readers.web import RssReader
 
 reader = RssReader()
 documents = reader.load_data(
diff --git a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/rss_news/README.md b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/rss_news/README.md
index 7b6965399f4e6d365d2fef2a57480098d4d8a830..fb345385a3be8a6cc94a8879a7bdaa88c1834821 100644
--- a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/rss_news/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/rss_news/README.md
@@ -9,7 +9,7 @@ To use this loader, pass in an array of URLs of RSS feeds. It will download the
 combine them:
 
 ```python
-from llama_index.readers.web.rss_news import RSSNewsReader
+from llama_index.core.readers.web.rss_news import RSSNewsReader
 
 urls = [
     "https://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml",
diff --git a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/simple_web/README.md b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/simple_web/README.md
index f14354eb2016a18a0384e79a6d69e9daa013b9b9..b6f9d0ffa433a3f506ca85863c78ebb99ac99930 100644
--- a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/simple_web/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/simple_web/README.md
@@ -1,5 +1,9 @@
 # Simple Website Loader
 
+```bash
+pip install llama-index-readers-web
+```
+
 This loader is a simple web scraper that fetches the text from static websites by converting the HTML to text.
 
 ## Usage
@@ -7,9 +11,7 @@ This loader is a simple web scraper that fetches the text from static websites b
 To use this loader, you need to pass in an array of URLs.
 
 ```python
-from llama_index import download_loader
-
-SimpleWebPageReader = download_loader("SimpleWebPageReader")
+from llama_index.readers.web import SimpleWebPageReader
 
 loader = SimpleWebPageReader()
 documents = loader.load_data(urls=["https://google.com"])
@@ -22,9 +24,9 @@ This loader is designed to be used as a way to load data into [LlamaIndex](https
 ### LlamaIndex
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 
-SimpleWebPageReader = download_loader("SimpleWebPageReader")
+from llama_index.readers.web import SimpleWebPageReader
 
 loader = SimpleWebPageReader()
 documents = loader.load_data(urls=["https://google.com"])
@@ -37,12 +39,12 @@ index.query("What language is on this website?")
 Note: Make sure you change the description of the `Tool` to match your use-case.
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 from langchain.agents import initialize_agent, Tool
 from langchain.llms import OpenAI
 from langchain.chains.conversation.memory import ConversationBufferMemory
 
-SimpleWebPageReader = download_loader("SimpleWebPageReader")
+from llama_index.readers.web import SimpleWebPageReader
 
 loader = SimpleWebPageReader()
 documents = loader.load_data(urls=["https://google.com"])
diff --git a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/sitemap/README.md b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/sitemap/README.md
index 67066ecceb2e8d8a9c782bdb71bbc348c83de35f..b7b5f557fe368b5bb5986cacba338389201d926b 100644
--- a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/sitemap/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/sitemap/README.md
@@ -1,5 +1,9 @@
 # Sitemap Loader
 
+```bash
+pip install llama-index-readers-web
+```
+
 This loader is an asynchronous web scraper that fetches the text from static websites by using its sitemap and optionally converting the HTML to text.
 
 It is based on the [Async Website Loader](https://llama-hub-ui.vercel.app/l/web-async_web)
@@ -9,7 +13,7 @@ It is based on the [Async Website Loader](https://llama-hub-ui.vercel.app/l/web-
 To use this loader, you just declare the sitemap.xml url like this:
 
 ```python
-from llama_index.readers.web.sitemap import SitemapReader
+from llama_index.readers.web import SitemapReader
 
 # for jupyter notebooks uncomment the following two lines of code:
 # import nest_asyncio
diff --git a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/trafilatura_web/README.md b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/trafilatura_web/README.md
index 2dc5b29dadee6d435523f6f7a7cf7efeacf91810..a75908bb1d6c93a7c62432964edf98ec9a25a5ec 100644
--- a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/trafilatura_web/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/trafilatura_web/README.md
@@ -1,5 +1,9 @@
 # Trafilatura Website Loader
 
+```bash
+pip install llama-index-readers-web
+```
+
 This loader is a web scraper that fetches the text from static websites using the `trafilatura` Python package.
 
 ## Usage
@@ -7,9 +11,7 @@ This loader is a web scraper that fetches the text from static websites using th
 To use this loader, you need to pass in an array of URLs.
 
 ```python
-from llama_index import download_loader
-
-TrafilaturaWebReader = download_loader("TrafilaturaWebReader")
+from llama_index.readers.web import TrafilaturaWebReader
 
 loader = TrafilaturaWebReader()
 documents = loader.load_data(urls=["https://google.com"])
@@ -22,9 +24,9 @@ This loader is designed to be used as a way to load data into [LlamaIndex](https
 ### LlamaIndex
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 
-TrafilaturaWebReader = download_loader("TrafilaturaWebReader")
+from llama_index.readers.web import TrafilaturaWebReader
 
 loader = TrafilaturaWebReader()
 documents = loader.load_data(urls=["https://google.com"])
@@ -37,12 +39,12 @@ index.query("What language is on this website?")
 Note: Make sure you change the description of the `Tool` to match your use-case.
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 from langchain.agents import initialize_agent, Tool
 from langchain.llms import OpenAI
 from langchain.chains.conversation.memory import ConversationBufferMemory
 
-TrafilaturaWebReader = download_loader("TrafilaturaWebReader")
+from llama_index.readers.web import TrafilaturaWebReader
 
 loader = TrafilaturaWebReader()
 documents = loader.load_data(urls=["https://google.com"])
diff --git a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/unstructured_web/README.md b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/unstructured_web/README.md
index de555a882e7b0aa0df88965a61112cf56a55021e..671e1d915bd3b7d27a6f66aba8ec73f7271e4673 100644
--- a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/unstructured_web/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/unstructured_web/README.md
@@ -1,14 +1,16 @@
 # Unstructured.io URL Loader
 
+```bash
+pip install llama-index-readers-web
+```
+
 This loader extracts the text from URLs using [Unstructured.io](https://github.com/Unstructured-IO/unstructured). The partition_html function partitions an HTML document and returns a list
 of document Element objects.
 
 ## Usage
 
 ```python
-from llama_index import download_loader
-
-UnstructuredURLLoader = download_loader("UnstructuredURLLoader")
+from llama_index.readers.web import UnstructuredURLLoader
 
 urls = [
     "https://www.understandingwar.org/backgrounder/russian-offensive-campaign-assessment-february-8-2023",
diff --git a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/whole_site/README.md b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/whole_site/README.md
index 4a7f9268f950f53110bbbc145178987e6b33d05d..7a758d467393ce39ed2ddc40abef0cf1cc66170f 100644
--- a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/whole_site/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/whole_site/README.md
@@ -1,5 +1,9 @@
 # WholeSiteReader
 
+```bash
+pip install llama-index-readers-web
+```
+
 The WholeSiteReader is a sophisticated web scraping tool that employs a breadth-first search (BFS) algorithm. It's designed to methodically traverse and extract content from entire websites, focusing specifically on predefined URL paths.
 
 ## Features
@@ -10,9 +14,8 @@ The WholeSiteReader is a sophisticated web scraping tool that employs a breadth-
 - **Selenium-Based:** Leverages Selenium for dynamic interaction with web pages, supporting JavaScript-rendered content.
 
 ```python
-from llama_index import download_loader
+from llama_index.readers.web import WholeSiteReader
 
-WholeSiteReader = download_loader("WholeSiteReader")
 # Initialize the scraper with a prefix URL and maximum depth
 scraper = WholeSiteReader(
     prefix="https://www.paulgraham.com/", max_depth=10  # Example prefix
@@ -31,9 +34,9 @@ This loader is designed to be used as a way to load data into [LlamaIndex](https
 ### LlamaIndex
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 
-WholeSiteReader = download_loader("WholeSiteReader")
+from llama_index.readers.web import WholeSiteReader
 
 # Initialize the scraper with a prefix URL and maximum depth
 scraper = WholeSiteReader(
@@ -54,12 +57,12 @@ index.query("What language is on this website?")
 Note: Make sure you change the description of the `Tool` to match your use-case.
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 from langchain.agents import initialize_agent, Tool
 from langchain.llms import OpenAI
 from langchain.chains.conversation.memory import ConversationBufferMemory
 
-WholeSiteReader = download_loader("WholeSiteReader")
+from llama_index.readers.web import WholeSiteReader
 
 # Initialize the scraper with a prefix URL and maximum depth
 scraper = WholeSiteReader(
diff --git a/llama-index-integrations/readers/llama-index-readers-whatsapp/README.md b/llama-index-integrations/readers/llama-index-readers-whatsapp/README.md
index 062ddad7e41c3b50e6b3f88fa8551cbcf4d1e161..7e02868f20b02cf5deb50fc8dcc3618fb22b1854 100644
--- a/llama-index-integrations/readers/llama-index-readers-whatsapp/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-whatsapp/README.md
@@ -1,5 +1,9 @@
 # Whatsapp chat loader
 
+```bash
+pip install llama-index-readers-whatsapp
+```
+
 ## Export a Whatsapp chat
 
 1. Open a chat
@@ -16,9 +20,8 @@ For more info see [Whatsapp's Help Center](https://faq.whatsapp.com/118041407917
 
 ```python
 from pathlib import Path
-from llama_index import download_loader
 
-WhatsappChatLoader = download_loader("WhatsappChatLoader")
+from llama_index.readers.whatsapp import WhatsappChatLoader
 
 path = "whatsapp.txt"
 loader = WhatsappChatLoader(path=path)
diff --git a/llama-index-integrations/readers/llama-index-readers-wordlift/README.md b/llama-index-integrations/readers/llama-index-readers-wordlift/README.md
index 38a2b9c3c7948ad62a822a20da07a1ced91db9dd..ef3f79e7acc9378adbe194a2bf92778d83e9429c 100644
--- a/llama-index-integrations/readers/llama-index-readers-wordlift/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-wordlift/README.md
@@ -1,5 +1,9 @@
 # WordLift Reader
 
+```bash
+pip install llama-index-readers-wordlift
+```
+
 The WordLift GraphQL Reader is a connector to fetch and transform data from a WordLift Knowledge Graph using your the WordLift Key. The connector provides a convenient way to load data from WordLift using a GraphQL query and transform it into a list of documents for further processing.
 
 ## Usage
@@ -15,10 +19,10 @@ Here's an example of how to use the WordLift GraphQL Reader:
 
 ```python
 import json
-from llama_index import VectorStoreIndex
-from llama_index.readers.schema import Document
+from llama_index.core import VectorStoreIndex
+from llama_index.core import Document
 from langchain.llms import OpenAI
-from llama_hub.wordlift import WordLiftLoader
+from llama_index.readers.wordlift import WordLiftLoader
 
 # Set up the necessary configuration options
 endpoint = "https://api.wordlift.io/graphql"
diff --git a/llama-index-integrations/readers/llama-index-readers-wordpress/README.md b/llama-index-integrations/readers/llama-index-readers-wordpress/README.md
index e46aadebd707d854cc80d7b06eead775b860bdf7..7c57428410868c7f6b77cf015ddb887001b0f8ff 100644
--- a/llama-index-integrations/readers/llama-index-readers-wordpress/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-wordpress/README.md
@@ -1,5 +1,9 @@
 # Wordpress Loader
 
+```bash
+pip install llama-index-readers-wordpress
+```
+
 This loader fetches the text from Wordpress blog posts using the Wordpress API. It also uses the BeautifulSoup library to parse the HTML and extract the text from the articles.
 
 ## Usage
@@ -7,9 +11,7 @@ This loader fetches the text from Wordpress blog posts using the Wordpress API.
 To use this loader, you need to pass base url of the Wordpress installation (e.g. `https://www.mysite.com`), a username, and an application password for the user (more about application passwords [here](https://www.paidmembershipspro.com/create-application-password-wordpress/))
 
 ```python
-from llama_index import download_loader
-
-WordpressReader = download_loader("WordpressReader")
+from llama_index.readers.wordpress import WordpressReader
 
 loader = WordpressReader(
     url="https://www.mysite.com",
diff --git a/llama-index-integrations/readers/llama-index-readers-youtube-transcript/README.md b/llama-index-integrations/readers/llama-index-readers-youtube-transcript/README.md
index cbd593ac41045f218524dd51229c82252256d09e..86333283ffed1a32e38d2fd282a63d18d5a53f70 100644
--- a/llama-index-integrations/readers/llama-index-readers-youtube-transcript/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-youtube-transcript/README.md
@@ -1,5 +1,11 @@
 # Youtube Transcript Loader
 
+```bash
+pip install llama-hub-youtube-transcript
+
+pip install llama-index-readers-youtube-transcript
+```
+
 This loader fetches the text transcript of Youtube videos using the `youtube_transcript_api` Python package.
 
 ## Usage
@@ -9,7 +15,7 @@ To use this loader, you will need to first `pip install youtube_transcript_api`.
 Then, simply pass an array of YouTube links into `load_data`:
 
 ```python
-from llama_hub.youtube_transcript import YoutubeTranscriptReader
+from llama_index.readers.youtube_transcript import YoutubeTranscriptReader
 
 loader = YoutubeTranscriptReader()
 documents = loader.load_data(
@@ -22,10 +28,10 @@ Supported URL formats: + youtube.com/watch?v={video_id} (with or without 'www.')
 To programmatically check if a URL is supported:
 
 ```python
-from llama_hub.youtube_transcript import is_youtube_video
+from llama_index.readers.youtube_transcript.utils import is_youtube_video
 
 is_youtube_video("https://youtube.com/watch?v=j83jrh2")  # => True
 is_youtube_video("https://vimeo.com/272134160")  # => False
 ```
 
-This loader is designed to be used as a way to load data into [LlamaIndex](https://github.com/run-llama/llama_index/tree/main/llama_index) and/or subsequently used as a Tool in a [LangChain](https://github.com/hwchase17/langchain) Agent. See [here](https://github.com/emptycrown/llama-hub/tree/main) for examples.
+This loader is designed to be used as a way to load data into [LlamaIndex](https://github.com/run-llama/llama_index/tree/main/llama_index) and/or subsequently used as a Tool in a [LangChain](https://github.com/hwchase17/langchain) Agent.
diff --git a/llama-index-integrations/readers/llama-index-readers-zendesk/README.md b/llama-index-integrations/readers/llama-index-readers-zendesk/README.md
index 11aeec68fb237d56315128fb2745069d3feb95b4..126da790a33099ee5c128bf0b6cedae9f1d44c35 100644
--- a/llama-index-integrations/readers/llama-index-readers-zendesk/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-zendesk/README.md
@@ -1,5 +1,9 @@
 # Zendesk Loader
 
+```bash
+pip install llama-index-readers-zendesk
+```
+
 This loader fetches the text from Zendesk help articles using the Zendesk API. It also uses the BeautifulSoup library to parse the HTML and extract the text from the articles.
 
 ## Usage
@@ -7,9 +11,7 @@ This loader fetches the text from Zendesk help articles using the Zendesk API. I
 To use this loader, you need to pass in the subdomain of a Zendesk account. No authentication is required. You can also set the locale of articles as needed.
 
 ```python
-from llama_index import download_loader
-
-ZendeskReader = download_loader("ZendeskReader")
+from llama_index.readers.zendesk import ZendeskReader
 
 loader = ZendeskReader(zendesk_subdomain="my_subdomain", locale="en-us")
 documents = loader.load_data()
diff --git a/llama-index-integrations/readers/llama-index-readers-zep/README.md b/llama-index-integrations/readers/llama-index-readers-zep/README.md
index dae8826f386e99468d5b06846203947d64ae2696..5b940f9e853b705f42b30c2f162b7cbc3afa22f7 100644
--- a/llama-index-integrations/readers/llama-index-readers-zep/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-zep/README.md
@@ -1,5 +1,9 @@
 # Zep Reader
 
+```bash
+pip install llama-index-readers-zep
+```
+
 The Zep Reader returns a set of texts corresponding to a text query or embeddings retrieved from a Zep Collection.
 The Reader is initialized with a Zep API URL and optionally an API key. The Reader can then be used to load data
 from a Zep Document Collection.
@@ -23,14 +27,13 @@ results.
 import time
 from uuid import uuid4
 
-from llama_index.node_parser import SimpleNodeParser
-from llama_index.readers.schema import Document
+from llama_index.core.node_parser import SimpleNodeParser
+from llama_index.core import Document
 from zep_python import ZepClient
 from zep_python.document import Document as ZepDocument
 
-from llama_index import download_loader
 
-ZepReader = download_loader("ZepReader")
+from llama_index.readers.zep import ZepReader
 
 # Create a Zep collection
 zep_api_url = "http://localhost:8000"  # replace with your Zep API URL
diff --git a/llama-index-integrations/tools/llama-index-tools-arxiv/README.md b/llama-index-integrations/tools/llama-index-tools-arxiv/README.md
index 7faed436c797981d41a0782bb20e9a4bb26f59bb..32b4f0b0c79273db0088d3c878f6389ada0f21e9 100644
--- a/llama-index-integrations/tools/llama-index-tools-arxiv/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-arxiv/README.md
@@ -10,7 +10,7 @@ Here's an example usage of the ArxivToolSpec.
 
 ```python
 from llama_index.tools.arxiv import ArxivToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 tool_spec = ArxivToolSpec()
 
diff --git a/llama-index-integrations/tools/llama-index-tools-azure-cv/README.md b/llama-index-integrations/tools/llama-index-tools-azure-cv/README.md
index d63a39a2020a6711fa816de397ca49c1e1e1cfe2..4005de47f5af229d641c21314ef34dd381359137 100644
--- a/llama-index-integrations/tools/llama-index-tools-azure-cv/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-azure-cv/README.md
@@ -12,7 +12,7 @@ Here's an example usage of the AzureCVToolSpec.
 
 ```python
 from llama_index.tools.azure_cv import AzureCVToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 tool_spec = AzureCVToolSpec(api_key="your-key", resource="your-resource")
 
diff --git a/llama-index-integrations/tools/llama-index-tools-azure-speech/README.md b/llama-index-integrations/tools/llama-index-tools-azure-speech/README.md
index 227278529a138e239a9f444d3cd5f16dd2855c98..d0e573a223a3a31be4fdc7e927f4cff913eb4f87 100644
--- a/llama-index-integrations/tools/llama-index-tools-azure-speech/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-azure-speech/README.md
@@ -8,7 +8,7 @@ This tool has a more extensive example usage documented in a Jupyter notebook [h
 
 ```python
 from llama_index.tools.azure_speech import AzureSpeechToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 speech_tool = AzureSpeechToolSpec(speech_key="your-key", region="eastus")
 
diff --git a/llama-index-integrations/tools/llama-index-tools-azure-translate/README.md b/llama-index-integrations/tools/llama-index-tools-azure-translate/README.md
index 69124ea986f0d2f5c87216cbd1acff08c9d0edd2..ce5554c1b8102564729f31145ac65a5c9f58d8e7 100644
--- a/llama-index-integrations/tools/llama-index-tools-azure-translate/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-azure-translate/README.md
@@ -13,7 +13,7 @@ This tool has a more extensive example usage documented in a Jupyter notebook [h
 Here's an example usage of the AzureTranslateToolSpec.
 
 ```python
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 from llama_index.tools.azure_translate import AzureTranslateToolSpec
 
 translate_tool = AzureTranslateToolSpec(api_key="your-key", region="eastus")
diff --git a/llama-index-integrations/tools/llama-index-tools-bing-search/README.md b/llama-index-integrations/tools/llama-index-tools-bing-search/README.md
index 762c18eecd7be39ba7e867eabd5af66c308dd561..d23c9f8773c6ed99a1ef32b30ed1f92b8364fae3 100644
--- a/llama-index-integrations/tools/llama-index-tools-bing-search/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-bing-search/README.md
@@ -12,7 +12,7 @@ Here's an example usage of the BingSearchToolSpec.
 
 ```python
 from llama_index.tools.bing_search import BingSearchToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 tool_spec = BingSearchToolSpec(api_key="your-key")
 
diff --git a/llama-index-integrations/tools/llama-index-tools-chatgpt-plugin/README.md b/llama-index-integrations/tools/llama-index-tools-chatgpt-plugin/README.md
index 6e607721802c938e3bb7c13794cfd3ddd72a3b25..e17ade24dd852db468f46bf98ac63c5542676479 100644
--- a/llama-index-integrations/tools/llama-index-tools-chatgpt-plugin/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-chatgpt-plugin/README.md
@@ -17,7 +17,7 @@ f = requests.get(
 manifest = yaml.safe_load(f)
 
 from llama_index.tools.chatgpt_plugin import ChatGPTPluginToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 from llama_index.tools.requests import RequestsToolSpec
 
 requests_spec = RequestsToolSpec()
diff --git a/llama-index-integrations/tools/llama-index-tools-code-interpreter/README.md b/llama-index-integrations/tools/llama-index-tools-code-interpreter/README.md
index 879fb398d942e11ee293b55f23582150dc0f8363..82bf5353c6056d6266b738f18028f19788b8e97b 100644
--- a/llama-index-integrations/tools/llama-index-tools-code-interpreter/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-code-interpreter/README.md
@@ -12,7 +12,7 @@ Here's an example usage of the CodeInterpreterToolSpec.
 
 ```python
 from llama_index.tools.code_interpreter import CodeInterpreterToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 code_spec = CodeInterpreterToolSpec()
 
diff --git a/llama-index-integrations/tools/llama-index-tools-cogniswitch/README.md b/llama-index-integrations/tools/llama-index-tools-cogniswitch/README.md
index deb89d56e1aff2b761c9999e24a7e4e118beac12..e90f35f43cb129b0f0d921369cfb111f697da88f 100644
--- a/llama-index-integrations/tools/llama-index-tools-cogniswitch/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-cogniswitch/README.md
@@ -36,7 +36,7 @@ import warnings
 warnings.filterwarnings("ignore")
 import os
 from llama_index.tools.cogniswitch import CogniswitchToolSpec
-from llama_index.agent import ReActAgent
+from llama_index.core.agent import ReActAgent
 ```
 
 ### Cogniswitch Credentials and OpenAI token
diff --git a/llama-index-integrations/tools/llama-index-tools-database/README.md b/llama-index-integrations/tools/llama-index-tools-database/README.md
index 28d91a29c4b0104c3ee99dbf89b4ff92ef8c6876..e7e7e84257a839baa71cd77c70abb6be8fc977c7 100644
--- a/llama-index-integrations/tools/llama-index-tools-database/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-database/README.md
@@ -10,7 +10,7 @@ Here's an example usage of the DatabaseToolSpec.
 
 ```python
 from llama_index.tools.database import DatabaseToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 db_tools = DatabaseToolSpec(
     scheme="postgresql",  # Database Scheme
diff --git a/llama-index-integrations/tools/llama-index-tools-duckduckgo/README.md b/llama-index-integrations/tools/llama-index-tools-duckduckgo/README.md
index 81cea3fd91fa7c7fcd2d7c02c99bcda0da818fe3..2eabbc8288d8714a18dde0c31686df09205d361c 100644
--- a/llama-index-integrations/tools/llama-index-tools-duckduckgo/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-duckduckgo/README.md
@@ -10,7 +10,7 @@ Here's an example usage of the DuckDuckGoSearchToolSpec.
 
 ```python
 from llama_index.tools.duckduckgo import DuckDuckGoSearchToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 tool_spec = DuckDuckGoSearchToolSpec()
 
diff --git a/llama-index-integrations/tools/llama-index-tools-exa/README.md b/llama-index-integrations/tools/llama-index-tools-exa/README.md
index c1bbf5d78588bcff730fec5e97dcd2fbb0ec1fb1..0bd2e13572172e4f81a48c2c44c64af466944dbf 100644
--- a/llama-index-integrations/tools/llama-index-tools-exa/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-exa/README.md
@@ -13,9 +13,7 @@ Here's an example usage of the ExaToolSpec.
 
 ```python
 from llama_index.tools.exa import ExaToolSpec
-from llama_index.agent.openai import (
-    OpenAIAgent,
-)  # requires llama-index-agent-openai
+from llama_index.agent.openai import OpenAIAgent
 
 exa_tool = ExaToolSpec(
     api_key="your-key",
diff --git a/llama-index-integrations/tools/llama-index-tools-graphql/README.md b/llama-index-integrations/tools/llama-index-tools-graphql/README.md
index 669e3638ff08af6ca684249fd009c04a2dd2a809..7924ba3637a163c246209db500ecab7bf84c8ba5 100644
--- a/llama-index-integrations/tools/llama-index-tools-graphql/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-graphql/README.md
@@ -12,7 +12,7 @@ This tool works best when the Agent has access to the GraphQL schema for the ser
 
 ```python
 from llama_index.tools.graphql import GraphQLToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 tool_spec = GraphQLToolSpec(
     url="https://spacex-production.up.railway.app/",
diff --git a/llama-index-integrations/tools/llama-index-tools-ionic-shopping/README.md b/llama-index-integrations/tools/llama-index-tools-ionic-shopping/README.md
index c4cf64e448b2948017dced815fcaa06cbbe93a7f..57670a76558f32618f2aa62545a9ffc4829db775 100644
--- a/llama-index-integrations/tools/llama-index-tools-ionic-shopping/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-ionic-shopping/README.md
@@ -1,5 +1,9 @@
 # LlamaIndex Tools Integration: Ionic Shopping
 
+```bash
+pip install llama-index-tools-ionic-shopping
+```
+
 [Ionic](https://ioniccommerce.com) is a plug and play ecommerce marketplace for AI Assistants.
 By including the Ionic Tool in your agent, you are effortlessly providing your users with the ability
 to shop and transact directly within your agent, and you’ll get a cut of the transaction.
@@ -10,7 +14,7 @@ Llearn more about how [Ionic attributes sales](https://docs.ioniccommerce.com/gu
 to your agent. Provide your Ionic API Key when instantiating the tool:
 
 ```python
-from llama_hub.tools.ionic_shopping.base import IonicShoppingToolSpec
+from llama_index.tools.ionic_shopping import IonicShoppingToolSpec
 
 ionic_tool = IonicShoppingToolSpec(api_key="<my Ionic API Key>").to_tool_list()
 ```
@@ -21,8 +25,10 @@ Try it out using the [Jupyter notebook](https://github.com/run-llama/llama-hub/b
 
 ```python
 import openai
-from llama_index.agent import OpenAIAgent  # requires llama-index-agent-openai
-from llama_hub.tools.ionic_shopping.base import IonicShoppingToolSpec
+from llama_index.core.agent import (
+    OpenAIAgent,
+)  # requires llama-index-agent-openai
+from llama_index.tools.ionic_shopping import IonicShoppingToolSpec
 
 openai.api_key = "sk-api-key"
 
diff --git a/llama-index-integrations/tools/llama-index-tools-metaphor/README.md b/llama-index-integrations/tools/llama-index-tools-metaphor/README.md
index 067f5964aece5a80decdd6d1f08f53fa934ec333..21f0d0514dc5dfec529456570923dea682890bcf 100644
--- a/llama-index-integrations/tools/llama-index-tools-metaphor/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-metaphor/README.md
@@ -19,7 +19,7 @@ Here's an example usage of the MetaphorToolSpec.
 
 ```python
 from llama_index.tools.metaphor import MetaphorToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 metaphor_tool = MetaphorToolSpec(
     api_key="your-key",
diff --git a/llama-index-integrations/tools/llama-index-tools-multion/README.md b/llama-index-integrations/tools/llama-index-tools-multion/README.md
index 9788c99f3c1aaa97faa8139b3b038f9024ab155d..5eb221b6d3560bf7865fd3c91d60ad50ca383569 100644
--- a/llama-index-integrations/tools/llama-index-tools-multion/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-multion/README.md
@@ -1,5 +1,9 @@
 # MultiOn Tool
 
+```bash
+pip install llama-index-tools-multion
+```
+
 This tool connects to [MultiOn](https://www.multion.ai/) to enable your agent to easily
 connect to the internet through your Chrome Web browser and act on your behalf
 
@@ -13,8 +17,8 @@ This tool has more a extensive example usage documented in a Jupyter notebook [h
 Here's an example usage of the MultionToolSpec.
 
 ```python
-from llama_index.tools.metaphor import MultionToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.tools.multion import MultionToolSpec
+from llama_index.agent.openai import OpenAIAgent
 
 multion_tool = MultionToolSpec()
 
diff --git a/llama-index-integrations/tools/llama-index-tools-neo4j/README.md b/llama-index-integrations/tools/llama-index-tools-neo4j/README.md
index 1f30d99f8bc104c92cae2d423d625118376935b1..9ae5a9c10b4f432e3bdada1bb6563daa27e9fb5f 100644
--- a/llama-index-integrations/tools/llama-index-tools-neo4j/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-neo4j/README.md
@@ -1,5 +1,9 @@
 # Neo4j Schema Query Builder
 
+```bash
+pip install llama-index-tools-neo4j
+```
+
 The `Neo4jQueryToolSpec` class provides a way to query a Neo4j graph database based on a provided schema definition. The class uses a language model to generate Cypher queries from user questions and has the capability to recover from Cypher syntax errors through a self-healing mechanism.
 
 ## Table of Contents
@@ -16,9 +20,9 @@ The `Neo4jQueryToolSpec` class provides a way to query a Neo4j graph database ba
 Initialize the `Neo4jQueryToolSpec` class with:
 
 ```python
-from llama_index.tools.neo4j_db import Neo4jQueryToolSpec
-from llama_index.llms import OpenAI
-from llama_index.agent import OpenAIAgent
+from llama_index.tools.neo4j import Neo4jQueryToolSpec
+from llama_index.llms.openai import OpenAI
+from llama_index.agent.openai import OpenAIAgent
 
 llm = OpenAI(model="gpt-4", openai_api_key="XXXX-XXXX", temperature=0)
 
diff --git a/llama-index-integrations/tools/llama-index-tools-notion/README.md b/llama-index-integrations/tools/llama-index-tools-notion/README.md
index 9b574669358fd0818ea0733771f7a40475d846f5..6945e6ad9e458767997ac4b9abe59c1d221e0f08 100644
--- a/llama-index-integrations/tools/llama-index-tools-notion/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-notion/README.md
@@ -10,7 +10,7 @@ Here's an example usage of the NotionToolSpec.
 
 ```python
 from llama_index.tools.notion import NotionToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 tool_spec = NotionToolSpec()
 
diff --git a/llama-index-integrations/tools/llama-index-tools-openai/README.md b/llama-index-integrations/tools/llama-index-tools-openai/README.md
index f91e6ad6d7b63c13415279d9117b88e5e8fc93bf..fb1c3367fcbe5df85b1233fd828c2daaeb8d280c 100644
--- a/llama-index-integrations/tools/llama-index-tools-openai/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-openai/README.md
@@ -9,9 +9,7 @@ This tool has a more extensive example usage documented in a Jupyter notebook [h
 ### Usage with Agent
 
 ```python
-from llama_index.tools.openai.image_generation import (
-    OpenAIImageGenerationToolSpec,
-)
+from llama_index.tools.openai import OpenAIImageGenerationToolSpec
 
 image_generation_tool = OpenAIImageGenerationToolSpec(
     api_key=os.environ["OPENAI_API_KEY"]
@@ -32,9 +30,7 @@ print(response)
 ### Usage directly
 
 ```python
-from llama_index.tools.openai.image_generation import (
-    OpenAIImageGenerationToolSpec,
-)
+from llama_index.tools.openai import OpenAIImageGenerationToolSpec
 
 image_generation_tool = OpenAIImageGenerationToolSpec(
     api_key=os.environ["OPENAI_API_KEY"]
diff --git a/llama-index-integrations/tools/llama-index-tools-openapi/README.md b/llama-index-integrations/tools/llama-index-tools-openapi/README.md
index c010ad25e12600f233009f91330589f3450fa70b..58bda6d03d99a561bb3c642c02b8d9bfa18cf661 100644
--- a/llama-index-integrations/tools/llama-index-tools-openapi/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-openapi/README.md
@@ -1,5 +1,9 @@
 # OpenAPI Tool
 
+```bash
+pip install llama-index-tools-openapi
+```
+
 This tool loads an OpenAPI spec and allow the Agent to retrieve endpoints and details about endpoints. The RequestsToolSpec can also be loaded into the agent to allow the agent to hit the necessary endpoints with a REST request.
 
 ## Usage
@@ -9,8 +13,8 @@ This tool has more extensive example usage documented in a Jupyter notebook [her
 Here's an example usage of the OpenAPIToolSpec.
 
 ```python
-from llama_hub.tools.openapi import OpenAPIToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.tools.openapi import OpenAPIToolSpec
+from llama_index.agent.openai import OpenAIAgent
 
 f = requests.get(
     "https://raw.githubusercontent.com/APIs-guru/openapi-directory/main/APIs/openai.com/1.2.0/openapi.yaml"
diff --git a/llama-index-integrations/tools/llama-index-tools-playgrounds/README.md b/llama-index-integrations/tools/llama-index-tools-playgrounds/README.md
index 084401a3da10ab129caf165c2344c5e435f728e9..10e41c325aeb3339177d8b717e4b158b35db9d83 100644
--- a/llama-index-integrations/tools/llama-index-tools-playgrounds/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-playgrounds/README.md
@@ -2,6 +2,10 @@
 
 ## playgrounds_subgraph_connector
 
+```bash
+pip install llama-index-tools-playgrounds
+```
+
 Playgrounds API is a service provided by [Playgrounds Analytics](https://playgrounds.network) to streamline interfacing with decentralized subgraphs (indexed blockchain datasets).
 
 The `PlaygroundsSubgraphConnector` is a tool designed for LLM agents to seamlessly interface with and query subgraphs on The Graph's decentralized network via Playgrounds API.
@@ -23,10 +27,8 @@ To utilize the tool, simply initialize it with the appropriate `identifier` (Sub
 
 ```python
 import openai
-from llama_index.agent import OpenAIAgent
-from llama_index.tools.playgrounds_subgraph_connector import (
-    PlaygroundsSubgraphConnectorToolSpec,
-)
+from llama_index.agent.openai import OpenAIAgent
+from llama_index.tools.playgrounds import PlaygroundsSubgraphConnectorToolSpec
 
 
 def simple_test():
@@ -85,10 +87,8 @@ To utilize the tool, initialize it with the appropriate `identifier` (Subgraph I
 
 ```python
 import openai
-from llama_index.agent import OpenAIAgent
-from llama_index.tools.playgrounds_subgraph_inspector import (
-    PlaygroundsSubgraphInspectorToolSpec,
-)
+from llama_index.agent.openai import OpenAIAgent
+from llama_index.tools.playgrounds import PlaygroundsSubgraphInspectorToolSpec
 
 
 def inspect_subgraph(
diff --git a/llama-index-integrations/tools/llama-index-tools-python-file/README.md b/llama-index-integrations/tools/llama-index-tools-python-file/README.md
index bb21b84c7705ea6e91237c1439f02f774c6eab82..424ed6212d2f56da45c2c6e0747d604b2461a73c 100644
--- a/llama-index-integrations/tools/llama-index-tools-python-file/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-python-file/README.md
@@ -10,7 +10,7 @@ Here's an example usage of the PythonFileToolSpec.
 
 ```python
 from llama_index.tools.python_file import PythonFileToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 pyfile = PythonFileToolSpec("./numpy_linalg.py")
 
diff --git a/llama-index-integrations/tools/llama-index-tools-requests/README.md b/llama-index-integrations/tools/llama-index-tools-requests/README.md
index ae2b99dc1a7e15a7d63a0b56a3ce8e386131973b..541f772c584d3aed66142f1a1ca078e8c6a899a0 100644
--- a/llama-index-integrations/tools/llama-index-tools-requests/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-requests/README.md
@@ -12,7 +12,7 @@ Here's an example usage of the RequestsToolSpec.
 
 ```python
 from llama_index.tools.requests import RequestsToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 domain_headers = {
     "api.openai.com": {
diff --git a/llama-index-integrations/tools/llama-index-tools-shopify/README.md b/llama-index-integrations/tools/llama-index-tools-shopify/README.md
index 32f530f504e77f710d5dc57381457d798702f3ab..c316c2f41f5f41f7b45fac686fccd9d6eb9d21d2 100644
--- a/llama-index-integrations/tools/llama-index-tools-shopify/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-shopify/README.md
@@ -8,12 +8,16 @@ This tool has more extensive example usage documented in a Jupyter notebook [her
 
 In particular, the tool is very effective when combined with a method of retrieving data from the GraphQL schema definition.
 
+```bash
+pip install llama-index llama-index-readers-file llama-index-tools-shopify unstructured
+```
+
 ```python
 from llama_index.tools.shopify import ShopifyToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
-from llama_index.file.unstructured import UnstructuredReader
-from llama_index.tools.ondemand_loader_tool import OnDemandLoaderTool
+from llama_index.readers.file import UnstructuredReader
+from llama_index.core.tools.ondemand_loader_tool import OnDemandLoaderTool
 
 documentation_tool = OnDemandLoaderTool.from_defaults(
     UnstructuredReader(),
diff --git a/llama-index-integrations/tools/llama-index-tools-slack/README.md b/llama-index-integrations/tools/llama-index-tools-slack/README.md
index 50eef495a82272d91ee301937d86cb4447ed03eb..77a5fc2fdd4c95a28c2c0f075db4c5b7f0d14aad 100644
--- a/llama-index-integrations/tools/llama-index-tools-slack/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-slack/README.md
@@ -6,7 +6,7 @@ This tool fetches the text from a list of Slack channels. You will need to initi
 
 ```python
 from llama_index.tools.slack import SlackToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 tool_spec = SlackToolSpec(slack_token="token")
 
diff --git a/llama-index-integrations/tools/llama-index-tools-tavily-research/README.md b/llama-index-integrations/tools/llama-index-tools-tavily-research/README.md
index 36ed8c5aee8a2fdaa1d2f3670daaf5ef30aba325..60a1bc317be0f2cf65b7adbe305df26ea8ea9e82 100644
--- a/llama-index-integrations/tools/llama-index-tools-tavily-research/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-tavily-research/README.md
@@ -20,7 +20,7 @@ Here's an example usage of the TavilyToolSpec.
 
 ```python
 from llama_index.tools.tavily_research import TavilyToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 tavily_tool = TavilyToolSpec(
     api_key="your-key",
diff --git a/llama-index-integrations/tools/llama-index-tools-text-to-image/README.md b/llama-index-integrations/tools/llama-index-tools-text-to-image/README.md
index 32c697453d579d9e632843a15db8734eeb1212ec..5c6a0109012359370b0275ec0ba67a3d6f85120e 100644
--- a/llama-index-integrations/tools/llama-index-tools-text-to-image/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-text-to-image/README.md
@@ -10,7 +10,7 @@ Another example showcases retrieval augmentation over a knowledge corpus with te
 
 ```python
 from llama_index.tools.text_to_image import TextToImageToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 openai.api_key = "sk-your-key"
 tool_spec = TextToImageToolSpec()
diff --git a/llama-index-integrations/tools/llama-index-tools-vector-db/README.md b/llama-index-integrations/tools/llama-index-tools-vector-db/README.md
index d7a7140a7f96f172543923e7823749cdfc5c5950..69f377837f590e9e23f30085486eb07bf606f54c 100644
--- a/llama-index-integrations/tools/llama-index-tools-vector-db/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-vector-db/README.md
@@ -6,9 +6,9 @@ This tool wraps a VectorStoreIndex and enables a agent to call it with queries a
 
 ```python
 from llama_index.tools.vector_db import VectorDB
-from llama_index.agent import OpenAIAgent
-from llama_index.vector_stores.types import VectorStoreInfo
-from llama_index import VectorStoreIndex
+from llama_index.agent.openai import OpenAIAgent
+from llama_index.core.vector_stores import VectorStoreInfo
+from llama_index.core import VectorStoreIndex
 
 index = VectorStoreIndex(nodes=nodes)
 tool_spec = VectorDB(index=index)
diff --git a/llama-index-integrations/tools/llama-index-tools-waii/README.md b/llama-index-integrations/tools/llama-index-tools-waii/README.md
index f1b5c6ca291ad48686776b2294e671eddeaa15ea..1da3f828130846c679840d0b399ce21fa44f9b73 100644
--- a/llama-index-integrations/tools/llama-index-tools-waii/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-waii/README.md
@@ -52,8 +52,8 @@ print(index.query("Which table contains most columns?"))
 #### Initialize the agent:
 
 ```python
-from llama_index.agent import OpenAIAgent
-from llama_index.llms import OpenAI
+from llama_index.agent.openai import OpenAIAgent
+from llama_index.llms.openai import OpenAI
 
 agent = OpenAIAgent.from_tools(
     waii_tool.to_tool_list(), llm=OpenAI(model="gpt-4-1106-preview")
diff --git a/llama-index-integrations/tools/llama-index-tools-weather/README.md b/llama-index-integrations/tools/llama-index-tools-weather/README.md
index 251f6745664500b07cc2c0cac729136641ac3923..434a965cca14ef70c159a55705f6111adb5d109d 100644
--- a/llama-index-integrations/tools/llama-index-tools-weather/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-weather/README.md
@@ -13,7 +13,7 @@ Here's an example usage of the OpenWeatherMapToolSpec.
 
 ```python
 from llama_index.tools.weather import OpenWeatherMapToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 tool_spec = OpenWeatherMapToolSpec(key="...")
 
diff --git a/llama-index-integrations/tools/llama-index-tools-wikipedia/README.md b/llama-index-integrations/tools/llama-index-tools-wikipedia/README.md
index 0b6915d30da48385bf86fcaf29f54590251cb3f0..9c8e83aa66f0b517809ec71552762c259044495f 100644
--- a/llama-index-integrations/tools/llama-index-tools-wikipedia/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-wikipedia/README.md
@@ -8,7 +8,7 @@ This tool has more extensive example usage documented in a Jupyter notebook [her
 
 ```python
 from llama_index.tools.wikipedia import WikipediaToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 tool_spec = WikipediaToolSpec()
 
diff --git a/llama-index-integrations/tools/llama-index-tools-wolfram-alpha/README.md b/llama-index-integrations/tools/llama-index-tools-wolfram-alpha/README.md
index d7dc202b317fb8dae24dca92a7fe13a890e79581..c6619a7015cae7e967350ed491a4abbddb3682f9 100644
--- a/llama-index-integrations/tools/llama-index-tools-wolfram-alpha/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-wolfram-alpha/README.md
@@ -12,7 +12,7 @@ Here's an example usage of the WolframAlphaToolSpec.
 
 ```python
 from llama_index.tools.wolfram_alpha import WolframAlphaToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 
 wolfram_spec = WolframAlphaToolSpec(app_id="API-key")
diff --git a/llama-index-integrations/tools/llama-index-tools-yahoo-finance/README.md b/llama-index-integrations/tools/llama-index-tools-yahoo-finance/README.md
index 21779bb953c08e5d2045388439b8f5d57a457bf4..b2c3a4ef84692d403412a56f9768b0f286aaea20 100644
--- a/llama-index-integrations/tools/llama-index-tools-yahoo-finance/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-yahoo-finance/README.md
@@ -8,7 +8,7 @@ Here's an example of how to use this tool:
 
 ```python
 from llama_index.tools.yahoo_finance import YahooFinanceToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 tool_spec = YahooFinanceToolSpec()
 agent = OpenAIAgent.from_tools(tool_spec.to_tool_list())
diff --git a/llama-index-integrations/tools/llama-index-tools-zapier/README.md b/llama-index-integrations/tools/llama-index-tools-zapier/README.md
index d26c068576aa8e9e798c5836cf306ae33ec329ca..cb3060d8299e536fcf6249352528d4b0eaef901f 100644
--- a/llama-index-integrations/tools/llama-index-tools-zapier/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-zapier/README.md
@@ -10,7 +10,7 @@ Here's an example usage of the ZapierToolSpec.
 
 ```python
 from llama_index.tools.zapier import ZapierToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 
 zapier_spec = ZapierToolSpec(api_key="sk-ak-your-key")
diff --git a/llama-index-packs/llama-index-packs-chroma-autoretrieval/README.md b/llama-index-packs/llama-index-packs-chroma-autoretrieval/README.md
index 7f70de593e16d315294acaa49136c8c3faa739f9..dc46644c7ba66f5a7e75e58df5dac792e48e58a2 100644
--- a/llama-index-packs/llama-index-packs-chroma-autoretrieval/README.md
+++ b/llama-index-packs/llama-index-packs-chroma-autoretrieval/README.md
@@ -31,7 +31,7 @@ Then, you can set up the pack like so:
 
 ```python
 # setup pack arguments
-from llama_index.core.vector_stores.types import MetadataInfo, VectorStoreInfo
+from llama_index.core.vector_stores import MetadataInfo, VectorStoreInfo
 
 vector_store_info = VectorStoreInfo(
     content_info="brief biography of celebrities",
diff --git a/llama-index-packs/llama-index-packs-cogniswitch-agent/README.md b/llama-index-packs/llama-index-packs-cogniswitch-agent/README.md
index 476ee35eb431ccf500f6e8db7a97d0353dc43817..0723ca6d75c418b01f899c05caca98ef8f150dbc 100644
--- a/llama-index-packs/llama-index-packs-cogniswitch-agent/README.md
+++ b/llama-index-packs/llama-index-packs-cogniswitch-agent/README.md
@@ -42,7 +42,7 @@ llamaindex-cli download-llamapack CogniswitchAgentPack --download-dir ./cs_pack
 import warnings
 
 warnings.filterwarnings("ignore")
-from llama_index.core.llama_packs import CogniswitchAgentPack
+from llama_index.packs.cogniswitch_agent import CogniswitchAgentPack
 import os
 
 
diff --git a/llama-index-packs/llama-index-packs-deeplake-deepmemory-retriever/README.md b/llama-index-packs/llama-index-packs-deeplake-deepmemory-retriever/README.md
index 7546a55058a75a62ed12b9569eb626baa52bf2cc..2be08da526be617bc845ba0e5cbf26e6f428e8e3 100644
--- a/llama-index-packs/llama-index-packs-deeplake-deepmemory-retriever/README.md
+++ b/llama-index-packs/llama-index-packs-deeplake-deepmemory-retriever/README.md
@@ -17,7 +17,7 @@ You can then inspect the files at `./deepmemory_pack` and use them as a template
 You can download the pack to a `./deepmemory_pack` directory:
 
 ```python
-from llama_hub.llama_pack import download_llama_pack
+from llama_index.core.llama_pack import download_llama_pack
 
 # download and install dependencies
 DeepMemoryRetriever = download_llama_pack(
@@ -31,7 +31,7 @@ Then, you can set up the pack like so:
 
 ```python
 # setup pack arguments
-from llama_index.core.vector_stores.types import MetadataInfo, VectorStoreInfo
+from llama_index.core.vector_stores import MetadataInfo, VectorStoreInfo
 
 nodes = [...]
 
diff --git a/llama-index-packs/llama-index-packs-deeplake-multimodal-retrieval/README.md b/llama-index-packs/llama-index-packs-deeplake-multimodal-retrieval/README.md
index 50427fe9d3d1664b8aec3f43f24eccdc817d2d06..72fc3f39107024b350dd93869df27a4845be161d 100644
--- a/llama-index-packs/llama-index-packs-deeplake-multimodal-retrieval/README.md
+++ b/llama-index-packs/llama-index-packs-deeplake-multimodal-retrieval/README.md
@@ -17,7 +17,7 @@ You can then inspect the files at `./deeplake_multimodal_pack` and use them as a
 You can download the pack to a `./deeplake_multimodal_pack` directory:
 
 ```python
-from llama_hub.llama_pack import download_llama_pack
+from llama_index.core.llama_pack import download_llama_pack
 
 # download and install dependencies
 DeepLakeMultimodalRetriever = download_llama_pack(
@@ -31,7 +31,7 @@ Then, you can set up the pack like so:
 
 ```python
 # setup pack arguments
-from llama_index.core.vector_stores.types import MetadataInfo, VectorStoreInfo
+from llama_index.core.vector_stores import MetadataInfo, VectorStoreInfo
 
 # collection of image and text nodes
 nodes = [...]
diff --git a/llama-index-packs/llama-index-packs-dense-x-retrieval/README.md b/llama-index-packs/llama-index-packs-dense-x-retrieval/README.md
index 4219bb448993150cc4bc991d020e4210f2de2abd..0b8bf6c6ae2a03c1e31bd537f00bfe8d14e887ac 100644
--- a/llama-index-packs/llama-index-packs-dense-x-retrieval/README.md
+++ b/llama-index-packs/llama-index-packs-dense-x-retrieval/README.md
@@ -29,7 +29,7 @@ You can then inspect the files at `./dense_pack` and use them as a template for
 You can download the pack to a the `./dense_pack` directory:
 
 ```python
-from llama_index import SimpleDirectoryReader
+from llama_index.core import SimpleDirectoryReader
 from llama_index.core.llama_pack import download_llama_pack
 
 # download and install dependencies
diff --git a/llama-index-packs/llama-index-packs-evaluator-benchmarker/README.md b/llama-index-packs/llama-index-packs-evaluator-benchmarker/README.md
index 3fff5c2aca9e18804a67aca5d60d2fb789660b7a..ba1ea63b00cd7d11c1381f7db44a149b8d4415f9 100644
--- a/llama-index-packs/llama-index-packs-evaluator-benchmarker/README.md
+++ b/llama-index-packs/llama-index-packs-evaluator-benchmarker/README.md
@@ -32,8 +32,8 @@ single-grading evaluation — in this case, the usage flow remains the same.
 from llama_index.core.llama_dataset import download_llama_dataset
 from llama_index.core.llama_pack import download_llama_pack
 from llama_index.core.evaluation import PairwiseComparisonEvaluator
-from llama_index.core.llms import OpenAI
-from llama_index import ServiceContext
+from llama_index.llms.openai import OpenAI
+from llama_index.core import ServiceContext
 
 # download a LabelledRagDataset from llama-hub
 pairwise_dataset = download_llama_dataset(
diff --git a/llama-index-packs/llama-index-packs-fuzzy-citation/README.md b/llama-index-packs/llama-index-packs-fuzzy-citation/README.md
index edb6f077ac1dc7e5cecd1e453336fe23f289a429..4b90e932aafa68f60f8a109181dceb7f79a1287e 100644
--- a/llama-index-packs/llama-index-packs-fuzzy-citation/README.md
+++ b/llama-index-packs/llama-index-packs-fuzzy-citation/README.md
@@ -21,7 +21,7 @@ You can then inspect the files at `./fuzzy_citation_pack` and use them as a temp
 You can download the pack to a the `./fuzzy_citation_pack` directory:
 
 ```python
-from llama_index import Document, VectorStoreIndex
+from llama_index.core import Document, VectorStoreIndex
 from llama_index.core.llama_pack import download_llama_pack
 
 # download and install dependencies
diff --git a/llama-index-packs/llama-index-packs-gmail-openai-agent/README.md b/llama-index-packs/llama-index-packs-gmail-openai-agent/README.md
index a8cd8cec53bbd4a7f80447bedeacdf4d638d7201..1e13d1329d6a228b030c1856fa91ab2ccd98433f 100644
--- a/llama-index-packs/llama-index-packs-gmail-openai-agent/README.md
+++ b/llama-index-packs/llama-index-packs-gmail-openai-agent/README.md
@@ -43,7 +43,7 @@ agent = gmail_agent_pack.agent
 response = agent.chat("What is my most recent email?")
 
 # Use the tool spec in another agent
-from llama_index.core.agents import ReActAgent
+from llama_index.core.agent import ReActAgent
 
 tool_spec = gmail_agent_pack.tool_spec
 agent = ReActAgent.from_tools(tool_spec.to_tool_lost())
diff --git a/llama-index-packs/llama-index-packs-koda-retriever/README.md b/llama-index-packs/llama-index-packs-koda-retriever/README.md
index 582d67388a106fdc7365191788db359d7234eb7f..c703e1467de2f8781675c8a9f8093c28758b5dbe 100644
--- a/llama-index-packs/llama-index-packs-koda-retriever/README.md
+++ b/llama-index-packs/llama-index-packs-koda-retriever/README.md
@@ -29,7 +29,7 @@ Please see the [examples](./examples/) folder for more specific examples.
 from llama_index.packs.koda_retriever import KodaRetriever
 from llama_index.core import VectorStoreIndex
 from llama_index.llms.openai import OpenAI
-from llama_index.embeddings.openai.base import OpenAIEmbedding
+from llama_index.embeddings.openai import OpenAIEmbedding
 from llama_index.core.postprocessor import LLMRerank
 from llama_index.core import Settings
 
diff --git a/llama-index-packs/llama-index-packs-multidoc-autoretrieval/README.md b/llama-index-packs/llama-index-packs-multidoc-autoretrieval/README.md
index d8250aa7652d56f6dc45ce750473074c86d0b235..f5ddbcdd8dcc236009d33ee139100844bf75134f 100644
--- a/llama-index-packs/llama-index-packs-multidoc-autoretrieval/README.md
+++ b/llama-index-packs/llama-index-packs-multidoc-autoretrieval/README.md
@@ -31,7 +31,7 @@ Then, you can set up the pack like so:
 
 ```python
 # setup pack arguments
-from llama_index.core.vector_stores.types import MetadataInfo, VectorStoreInfo
+from llama_index.core.vector_stores import MetadataInfo, VectorStoreInfo
 
 import weaviate
 
diff --git a/llama-index-packs/llama-index-packs-nebulagraph-query-engine/README.md b/llama-index-packs/llama-index-packs-nebulagraph-query-engine/README.md
index a776ac5c2c001820f013b570dd0a752e5724edfd..5b3538fda58c99a55ba28980de837e57425e7679 100644
--- a/llama-index-packs/llama-index-packs-nebulagraph-query-engine/README.md
+++ b/llama-index-packs/llama-index-packs-nebulagraph-query-engine/README.md
@@ -37,11 +37,15 @@ From here, you can use the pack, or inspect and modify the pack in `./nebulagrap
 
 Then, you can set up the pack like so:
 
+```bash
+pip install llama-index-readers-wikipedia
+```
+
 ```python
 # Load the docs (example of Paleo diet from Wikipedia)
-from llama_index import download_loader
 
-WikipediaReader = download_loader("WikipediaReader")
+from llama_index.readers.wikipedia import WikipediaReader
+
 loader = WikipediaReader()
 docs = loader.load_data(pages=["Paleolithic diet"], auto_suggest=False)
 print(f"Loaded {len(docs)} documents")
@@ -75,7 +79,7 @@ nebulagraph_pack = NebulaGraphQueryEnginePack(
 Optionally, you can pass in the `query_engine_type` from `NebulaGraphQueryEngineType` to construct `NebulaGraphQueryEnginePack`. If `query_engine_type` is not defined, it defaults to Knowledge Graph vector based entity retrieval.
 
 ```python
-from llama_index.packs.nebulagraph_query_engine.base import (
+from llama_index.core.packs.nebulagraph_query_engine.base import (
     NebulaGraphQueryEngineType,
 )
 
diff --git a/llama-index-packs/llama-index-packs-neo4j-query-engine/README.md b/llama-index-packs/llama-index-packs-neo4j-query-engine/README.md
index c508cc743a6d3b15acd743ffb97cfa14ad241b5f..2582fe676a260686d83c7702b066d7e0efc2dc6f 100644
--- a/llama-index-packs/llama-index-packs-neo4j-query-engine/README.md
+++ b/llama-index-packs/llama-index-packs-neo4j-query-engine/README.md
@@ -37,11 +37,15 @@ From here, you can use the pack, or inspect and modify the pack in `./neo4j_pack
 
 Then, you can set up the pack like so:
 
+```bash
+pip install llama-index-readers-wikipedia
+```
+
 ```python
 # Load the docs (example of Paleo diet from Wikipedia)
-from llama_index import download_loader
 
-WikipediaReader = download_loader("WikipediaReader")
+from llama_index.readers.wikipedia import WikipediaReader
+
 loader = WikipediaReader()
 docs = loader.load_data(pages=["Paleolithic diet"], auto_suggest=False)
 print(f"Loaded {len(docs)} documents")
@@ -63,7 +67,7 @@ neo4j_pack = Neo4jQueryEnginePack(
 Optionally, you can pass in the `query_engine_type` from `Neo4jQueryEngineType` to construct `Neo4jQueryEnginePack`. If `query_engine_type` is not defined, it defaults to Knowledge Graph vector based entity retrieval.
 
 ```python
-from llama_index.packs.neo4j_query_engine.base import Neo4jQueryEngineType
+from llama_index.core.packs.neo4j_query_engine.base import Neo4jQueryEngineType
 
 # create the pack
 neo4j_pack = Neo4jQueryEnginePack(
diff --git a/llama-index-packs/llama-index-packs-rag-cli-local/README.md b/llama-index-packs/llama-index-packs-rag-cli-local/README.md
index 90352835e6f1e00035c168412e3dc7696d273c4d..ee3ba4a0d2eab647a4a99f3c995f3f92109f56a0 100644
--- a/llama-index-packs/llama-index-packs-rag-cli-local/README.md
+++ b/llama-index-packs/llama-index-packs-rag-cli-local/README.md
@@ -21,7 +21,7 @@ which makes it hard to load directly.
 We will show you how to import the agent from these files!
 
 ```python
-from llama_index.llama_pack import download_llama_pack
+from llama_index.core.llama_pack import download_llama_pack
 
 # download and install dependencies
 download_llama_pack("LocalRAGCLIPack", "./local_rag_cli_pack", skip_load=True)
diff --git a/llama-index-packs/llama-index-packs-rag-evaluator/README.md b/llama-index-packs/llama-index-packs-rag-evaluator/README.md
index abe136c89642ae095131b027c78393a1aa4890f5..d5a6d405f75f82112c4260fccadeed40d10b5072 100644
--- a/llama-index-packs/llama-index-packs-rag-evaluator/README.md
+++ b/llama-index-packs/llama-index-packs-rag-evaluator/README.md
@@ -25,7 +25,7 @@ built off of its source documents.
 ```python
 from llama_index.core.llama_dataset import download_llama_dataset
 from llama_index.core.llama_pack import download_llama_pack
-from llama_index import VectorStoreIndex
+from llama_index.core import VectorStoreIndex
 
 # download a LabelledRagDataset from llama-hub
 rag_dataset, documents = download_llama_dataset(
diff --git a/llama-index-packs/llama-index-packs-redis-ingestion-pipeline/README.md b/llama-index-packs/llama-index-packs-redis-ingestion-pipeline/README.md
index 6c4dc6de2f12f0526e2cc636e308913640f843ea..cbe2a65e5148ca280bf707c80ea61927782b7e4e 100644
--- a/llama-index-packs/llama-index-packs-redis-ingestion-pipeline/README.md
+++ b/llama-index-packs/llama-index-packs-redis-ingestion-pipeline/README.md
@@ -30,8 +30,8 @@ From here, you can use the pack, or inspect and modify the pack in `./redis_inge
 Then, you can set up the pack like so:
 
 ```python
-from llama_index.core.text_splitter import SentenceSplitter
-from llama_index.core.embeddings import OpenAIEmbedding
+from llama_index.core.node_parser import SentenceSplitter
+from llama_index.embeddings.openai import OpenAIEmbedding
 
 transformations = [SentenceSplitter(), OpenAIEmbedding()]
 
diff --git a/llama-index-packs/llama-index-packs-retry-engine-weaviate/README.md b/llama-index-packs/llama-index-packs-retry-engine-weaviate/README.md
index 345ca13440b3a51da1bcb8357a7961ef8846d338..5f88ce6597dc5ef7b650944ca43dce359928a90d 100644
--- a/llama-index-packs/llama-index-packs-retry-engine-weaviate/README.md
+++ b/llama-index-packs/llama-index-packs-retry-engine-weaviate/README.md
@@ -31,7 +31,7 @@ Then, you can set up the pack like so:
 
 ```python
 # setup pack arguments
-from llama_index.core.vector_stores.types import MetadataInfo, VectorStoreInfo
+from llama_index.core.vector_stores import MetadataInfo, VectorStoreInfo
 
 vector_store_info = VectorStoreInfo(
     content_info="brief biography of celebrities",
diff --git a/llama-index-packs/llama-index-packs-self-discover/README.md b/llama-index-packs/llama-index-packs-self-discover/README.md
index b7b923ca172385d4fc1b99a102a2186f94eb4768..51ce36fc26b2da7d639bcd9fc3d0edf1ed5172c3 100644
--- a/llama-index-packs/llama-index-packs-self-discover/README.md
+++ b/llama-index-packs/llama-index-packs-self-discover/README.md
@@ -36,7 +36,7 @@ There are two ways using LlamaPack:
 ### Using `download_llama_pack`
 
 ```python
-from llama_index.llama_pack import download_llama_pack
+from llama_index.core.llama_pack import download_llama_pack
 
 # download and install dependencies
 SelfDiscoverPack = download_llama_pack(
diff --git a/llama-index-packs/llama-index-packs-self-rag/README.md b/llama-index-packs/llama-index-packs-self-rag/README.md
index 56bf4cec94b544cff000fb51711b68d8708b83ea..5caf1f240c704c905fd423ab41549b45909c5963 100644
--- a/llama-index-packs/llama-index-packs-self-rag/README.md
+++ b/llama-index-packs/llama-index-packs-self-rag/README.md
@@ -28,7 +28,7 @@ huggingface-cli download m4r1/selfrag_llama2_7b-GGUF selfrag_llama2_7b.q4_k_m.gg
 ```
 
 ```python
-from llama_index.llama_pack import download_llama_pack
+from llama_index.core.llama_pack import download_llama_pack
 
 # download and install dependencies
 SelfRAGPack = download_llama_pack("SelfRAGPack", "./self_rag_pack")
diff --git a/llama-index-packs/llama-index-packs-sub-question-weaviate/README.md b/llama-index-packs/llama-index-packs-sub-question-weaviate/README.md
index 0ef0cdfb83171fdce9d1af6da059a2a414f2d9b3..5d258b12f1ad71e935af1b893d95634641ccdcb3 100644
--- a/llama-index-packs/llama-index-packs-sub-question-weaviate/README.md
+++ b/llama-index-packs/llama-index-packs-sub-question-weaviate/README.md
@@ -31,7 +31,7 @@ Then, you can set up the pack like so:
 
 ```python
 # setup pack arguments
-from llama_index.core.vector_stores.types import MetadataInfo, VectorStoreInfo
+from llama_index.core.vector_stores import MetadataInfo, VectorStoreInfo
 
 vector_store_info = VectorStoreInfo(
     content_info="brief biography of celebrities",
diff --git a/llama-index-packs/llama-index-packs-timescale-vector-autoretrieval/README.md b/llama-index-packs/llama-index-packs-timescale-vector-autoretrieval/README.md
index a9204d506b64268aec2aafd2454c27809d382472..56a07d28ee741d92e1f01e6f1e3eed066a405b32 100644
--- a/llama-index-packs/llama-index-packs-timescale-vector-autoretrieval/README.md
+++ b/llama-index-packs/llama-index-packs-timescale-vector-autoretrieval/README.md
@@ -51,7 +51,7 @@ You can then inspect the files at `./tsv_pack` and use them as a template for yo
 You can download the pack to a the `./tsv_pack` directory:
 
 ```python
-from llama_hub.llama_pack import download_llama_pack
+from llama_index.core.llama_pack import download_llama_pack
 
 # download and install dependencies
 TimescaleVectorAutoretrievalPack = download_llama_pack(
@@ -65,7 +65,7 @@ Then, you can set up the pack like so:
 
 ```python
 # setup pack arguments
-from llama_index.core.vector_stores.types import MetadataInfo, VectorStoreInfo
+from llama_index.core.vector_stores import MetadataInfo, VectorStoreInfo
 from timescale_vector import client
 from dotenv import load_dotenv, find_dotenv
 import os
diff --git a/llama-index-packs/llama-index-packs-vanna/README.md b/llama-index-packs/llama-index-packs-vanna/README.md
index d4b42ec9cc5d3a7a48521b522e5a33677c016b2f..42fde06f552d093f3b123cee117c07a70a6c5cca 100644
--- a/llama-index-packs/llama-index-packs-vanna/README.md
+++ b/llama-index-packs/llama-index-packs-vanna/README.md
@@ -24,7 +24,7 @@ You can then inspect the files at `./vanna_pack` and use them as a template for
 You can download the pack to a `./vanna_pack` directory:
 
 ```python
-from llama_index.llama_pack import download_llama_pack
+from llama_index.core.llama_pack import download_llama_pack
 
 # download and install dependencies
 VannaPack = download_llama_pack("VannaPack", "./vanna_pack")