From 162fce0c60e1d542a4355bff9053318ec7a2bc6e Mon Sep 17 00:00:00 2001
From: Logan <logan.markewich@live.com>
Date: Mon, 4 Mar 2024 21:11:25 -0600
Subject: [PATCH] update readmes (#11627)

---
 .../tools/tool_spec/load_and_search/README.md |  6 +++-
 .../core/vector_stores/__init__.py            |  5 ++-
 llama-index-integrations/readers/README.md    |  9 ++---
 .../README.md                                 |  8 +++--
 .../llama-index-readers-airbyte-cdk/README.md |  9 +++--
 .../README.md                                 | 11 +++---
 .../README.md                                 | 11 +++---
 .../README.md                                 | 11 +++---
 .../README.md                                 | 11 +++---
 .../README.md                                 | 11 +++---
 .../README.md                                 | 11 +++---
 .../README.md                                 | 13 +++----
 .../llama-index-readers-airtable/README.md    |  7 ++--
 .../llama-index-readers-apify/README.md       | 14 ++++----
 .../llama-index-readers-arango-db/README.md   |  9 +++--
 .../llama-index-readers-asana/README.md       |  7 ++--
 .../llama-index-readers-assemblyai/README.md  | 34 +++----------------
 .../llama-index-readers-astra-db/README.md    |  8 +++--
 .../llama-index-readers-athena/README.md      | 14 +++++---
 .../README.md                                 | 12 ++++---
 .../README.md                                 | 15 ++++----
 .../llama-index-readers-bilibili/README.md    |  7 ++--
 .../llama-index-readers-bitbucket/README.md   |  8 +++--
 .../llama-index-readers-boarddocs/README.md   |  8 +++--
 .../llama-index-readers-confluence/README.md  | 10 ++++--
 .../llama-index-readers-couchbase/README.md   |  7 ++--
 .../llama-index-readers-couchdb/README.md     |  7 ++--
 .../llama-index-readers-dad-jokes/README.md   |  8 +++--
 .../llama-index-readers-discord/README.md     |  7 ++--
 .../llama-index-readers-docugami/README.md    |  8 +++--
 .../README.md                                 | 16 ++++-----
 .../llama-index-readers-feedly-rss/README.md  |  8 +++--
 .../llama-index-readers-feishu-docs/README.md |  9 +++--
 .../llama-index-readers-feishu-wiki/README.md |  2 --
 .../readers/file/image_deplot/README.md       |  6 +++-
 .../readers/file/paged_csv/README.md          |  7 ++--
 .../readers/file/pymu_pdf/README.md           |  9 +++--
 .../readers/file/unstructured/README.md       | 11 +++---
 .../llama_index/readers/file/xml/README.md    |  9 +++--
 .../README.md                                 | 12 ++++---
 .../llama-index-readers-firestore/README.md   |  7 ++--
 .../llama-index-readers-genius/README.md      | 10 +++---
 .../llama-index-readers-gpt-repo/README.md    |  8 +++--
 .../README.md                                 |  7 ++--
 .../llama-index-readers-graphql/README.md     |  7 ++--
 .../llama-index-readers-guru/README.md        |  8 +++--
 .../llama-index-readers-hatena-blog/README.md |  7 ++--
 .../llama-index-readers-hive/README.md        | 10 +++---
 .../llama-index-readers-hubspot/README.md     |  7 ++--
 .../README.md                                 |  7 ++--
 .../readers/llama-index-readers-hwp/README.md |  6 +++-
 .../llama-index-readers-imdb-review/README.md | 17 +++++-----
 .../llama-index-readers-intercom/README.md    |  8 +++--
 .../llama-index-readers-jira/README.md        | 10 +++---
 .../llama-index-readers-joplin/README.md      |  8 +++--
 .../llama-index-readers-kaltura/README.md     |  8 +++--
 .../llama-index-readers-kibela/README.md      |  9 +++--
 .../llama-index-readers-lilac/README.md       | 13 ++++---
 .../llama-index-readers-linear/README.md      | 10 +++---
 .../README.md                                 |  8 +++--
 .../llama-index-readers-mangadex/README.md    |  8 +++--
 .../README.md                                 |  8 +++--
 .../llama-index-readers-maps/README.md        | 12 +++----
 .../llama-index-readers-memos/README.md       |  7 ++--
 .../README.md                                 | 12 +++----
 .../README.md                                 | 12 ++++---
 .../README.md                                 |  8 +++--
 .../llama-index-readers-minio/README.md       |  2 +-
 .../llama_index/README.md                     |  4 ---
 .../README.md                                 |  8 +++--
 .../llama-index-readers-nougat-ocr/README.md  |  6 +++-
 .../llama-index-readers-openalex/README.md    |  6 +++-
 .../llama-index-readers-opendal/README.md     | 20 ++++-------
 .../llama-index-readers-opensearch/README.md  |  8 +++--
 .../llama-index-readers-pandas-ai/README.md   |  7 ++--
 .../llama-index-readers-papers/README.md      | 16 ++++-----
 .../llama-index-readers-patentsview/README.md |  7 ++--
 .../readers/llama-index-readers-pdb/README.md |  6 +++-
 .../llama-index-readers-pdf-table/README.md   |  6 +++-
 .../llama-index-readers-preprocess/README.md  | 14 ++++----
 .../llama-index-readers-rayyan/README.md      |  7 ++--
 .../llama-index-readers-readwise/README.md    | 14 +++++---
 .../llama-index-readers-reddit/README.md      | 12 ++++---
 .../README.md                                 |  8 +++--
 .../llama-index-readers-remote/README.md      |  8 +++--
 .../readers/llama-index-readers-s3/README.md  |  4 ---
 .../llama-index-readers-sec-filings/README.md | 17 +++++-----
 .../README.md                                 | 17 ++++++----
 .../llama-index-readers-singlestore/README.md |  6 +++-
 .../README.md                                 |  8 +++--
 .../llama-index-readers-snowflake/README.md   | 12 +++----
 .../README.md                                 |  7 ++--
 .../llama-index-readers-spotify/README.md     | 12 ++++---
 .../llama-index-readers-stripe-docs/README.md |  8 +++--
 .../llama-index-readers-telegram/README.md    |  7 ++--
 .../llama-index-readers-trello/README.md      |  7 ++--
 .../llama-index-readers-weather/README.md     |  8 +++--
 .../readers/web/async_web/README.md           |  6 +++-
 .../readers/web/beautiful_soup_web/README.md  | 16 +++++----
 .../readers/web/knowledge_base/README.md      | 16 +++++----
 .../web/main_content_extractor/README.md      | 16 +++++----
 .../llama_index/readers/web/news/README.md    |  6 +++-
 .../readers/web/readability_web/README.md     | 16 ++++-----
 .../llama_index/readers/web/rss/README.md     |  8 +++--
 .../readers/web/rss_news/README.md            |  2 +-
 .../readers/web/simple_web/README.md          | 16 +++++----
 .../llama_index/readers/web/sitemap/README.md |  6 +++-
 .../readers/web/trafilatura_web/README.md     | 16 +++++----
 .../readers/web/unstructured_web/README.md    |  8 +++--
 .../readers/web/whole_site/README.md          | 15 ++++----
 .../llama-index-readers-whatsapp/README.md    |  7 ++--
 .../llama-index-readers-wordlift/README.md    | 10 ++++--
 .../llama-index-readers-wordpress/README.md   |  8 +++--
 .../README.md                                 | 12 +++++--
 .../llama-index-readers-zendesk/README.md     |  8 +++--
 .../readers/llama-index-readers-zep/README.md | 11 +++---
 .../tools/llama-index-tools-arxiv/README.md   |  2 +-
 .../llama-index-tools-azure-cv/README.md      |  2 +-
 .../llama-index-tools-azure-speech/README.md  |  2 +-
 .../README.md                                 |  2 +-
 .../llama-index-tools-bing-search/README.md   |  2 +-
 .../README.md                                 |  2 +-
 .../README.md                                 |  2 +-
 .../llama-index-tools-cogniswitch/README.md   |  2 +-
 .../llama-index-tools-database/README.md      |  2 +-
 .../llama-index-tools-duckduckgo/README.md    |  2 +-
 .../tools/llama-index-tools-exa/README.md     |  4 +--
 .../tools/llama-index-tools-graphql/README.md |  2 +-
 .../README.md                                 | 12 +++++--
 .../llama-index-tools-metaphor/README.md      |  2 +-
 .../tools/llama-index-tools-multion/README.md |  8 +++--
 .../tools/llama-index-tools-neo4j/README.md   | 10 ++++--
 .../tools/llama-index-tools-notion/README.md  |  2 +-
 .../tools/llama-index-tools-openai/README.md  |  8 ++---
 .../tools/llama-index-tools-openapi/README.md |  8 +++--
 .../llama-index-tools-playgrounds/README.md   | 16 ++++-----
 .../llama-index-tools-python-file/README.md   |  2 +-
 .../llama-index-tools-requests/README.md      |  2 +-
 .../tools/llama-index-tools-shopify/README.md | 10 ++++--
 .../tools/llama-index-tools-slack/README.md   |  2 +-
 .../README.md                                 |  2 +-
 .../llama-index-tools-text-to-image/README.md |  2 +-
 .../llama-index-tools-vector-db/README.md     |  6 ++--
 .../tools/llama-index-tools-waii/README.md    |  4 +--
 .../tools/llama-index-tools-weather/README.md |  2 +-
 .../llama-index-tools-wikipedia/README.md     |  2 +-
 .../llama-index-tools-wolfram-alpha/README.md |  2 +-
 .../llama-index-tools-yahoo-finance/README.md |  2 +-
 .../tools/llama-index-tools-zapier/README.md  |  2 +-
 .../README.md                                 |  2 +-
 .../README.md                                 |  2 +-
 .../README.md                                 |  4 +--
 .../README.md                                 |  4 +--
 .../README.md                                 |  2 +-
 .../README.md                                 |  4 +--
 .../README.md                                 |  2 +-
 .../README.md                                 |  2 +-
 .../README.md                                 |  2 +-
 .../README.md                                 |  2 +-
 .../README.md                                 | 10 ++++--
 .../README.md                                 | 10 ++++--
 .../llama-index-packs-rag-cli-local/README.md |  2 +-
 .../llama-index-packs-rag-evaluator/README.md |  2 +-
 .../README.md                                 |  4 +--
 .../README.md                                 |  2 +-
 .../llama-index-packs-self-discover/README.md |  2 +-
 .../llama-index-packs-self-rag/README.md      |  2 +-
 .../README.md                                 |  2 +-
 .../README.md                                 |  4 +--
 .../llama-index-packs-vanna/README.md         |  2 +-
 170 files changed, 779 insertions(+), 533 deletions(-)

diff --git a/llama-index-core/llama_index/core/tools/tool_spec/load_and_search/README.md b/llama-index-core/llama_index/core/tools/tool_spec/load_and_search/README.md
index fad3c0db7b..53f2ce8d9e 100644
--- a/llama-index-core/llama_index/core/tools/tool_spec/load_and_search/README.md
+++ b/llama-index-core/llama_index/core/tools/tool_spec/load_and_search/README.md
@@ -1,5 +1,9 @@
 # LoadAndSearch Tool
 
+```bash
+pip install llama-index-tools-wikipedia
+```
+
 This Tool Spec is intended to wrap other tools, allowing the Agent to perform separate loading and reading of data. This is very useful for when tools return information larger than or closer to the size of the context window.
 
 ## Usage
@@ -11,7 +15,7 @@ from llama_index.core.tools.tool_spec.load_and_search import (
     LoadAndSearchToolSpec,
 )
 from llama_index.core.agent import OpenAIAgent
-from llama_hub.tools.wikipedia.base import WikipediaToolSpec
+from llama_index.tools.wikipedia.base import WikipediaToolSpec
 
 wiki_spec = WikipediaToolSpec()
 
diff --git a/llama-index-core/llama_index/core/vector_stores/__init__.py b/llama-index-core/llama_index/core/vector_stores/__init__.py
index c6ce1525be..713c7f859a 100644
--- a/llama-index-core/llama_index/core/vector_stores/__init__.py
+++ b/llama-index-core/llama_index/core/vector_stores/__init__.py
@@ -1,6 +1,5 @@
 """Vector stores."""
 
-
 from llama_index.core.vector_stores.simple import SimpleVectorStore
 from llama_index.core.vector_stores.types import (
     ExactMatchFilter,
@@ -8,8 +7,10 @@ from llama_index.core.vector_stores.types import (
     FilterOperator,
     MetadataFilter,
     MetadataFilters,
+    MetadataInfo,
     VectorStoreQuery,
     VectorStoreQueryResult,
+    VectorStoreInfo,
 )
 
 __all__ = [
@@ -17,8 +18,10 @@ __all__ = [
     "VectorStoreQueryResult",
     "MetadataFilters",
     "MetadataFilter",
+    "MetadataInfo",
     "ExactMatchFilter",
     "FilterCondition",
     "FilterOperator",
     "SimpleVectorStore",
+    "VectorStoreInfo",
 ]
diff --git a/llama-index-integrations/readers/README.md b/llama-index-integrations/readers/README.md
index 28086dbb4d..cd6aa2c93d 100644
--- a/llama-index-integrations/readers/README.md
+++ b/llama-index-integrations/readers/README.md
@@ -1,15 +1,16 @@
 # Readers (Loaders)
 
-## Reader Usage (Use `download_loader` from LlamaIndex)
+Readers can be installed directly as packages:
 
-You can also use the loaders with `download_loader` from LlamaIndex in a single line of code.
+```bash
+pip install llama-index-readers-google
+```
 
 For example, see the code snippets below using the Google Docs Loader.
 
 ```python
 from llama_index.core import VectorStoreIndex, download_loader
-
-GoogleDocsReader = download_loader("GoogleDocsReader")
+from llama_index.readers.google import GoogleDocsReader
 
 gdoc_ids = ["1wf-y2pd9C878Oh-FmLH7Q_BQkljdm6TQal-c1pUfrec"]
 loader = GoogleDocsReader()
diff --git a/llama-index-integrations/readers/llama-index-readers-agent-search/README.md b/llama-index-integrations/readers/llama-index-readers-agent-search/README.md
index 3d7f9ad282..d67611bcea 100644
--- a/llama-index-integrations/readers/llama-index-readers-agent-search/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-agent-search/README.md
@@ -1,5 +1,9 @@
 # AgentSearch Loader
 
+```bash
+pip install llama-index-readers-agent-search
+```
+
 This framework facilitates seamless integration with the AgentSearch dataset or hosted search APIs (e.g. Search Engines) and with RAG-specialized LLM's (e.g. Search Agents).
 
 During query-time, the user passes in the query string, search provider (`bing`, `agent-search`), and RAG provider model (`SciPhi/Sensei-7B-V1`).
@@ -15,9 +19,7 @@ Here's an example usage of the AgentSearchReader.
 # import os
 # os.environ["SCIPHI_API_KEY"] = "..."
 
-from llama_index import download_loader
-
-AgentSearch = download_loader("AgentSearchReader")
+from llama_index.readers.agent_search import AgentSearchReader
 
 reader = AgentSearch()
 
diff --git a/llama-index-integrations/readers/llama-index-readers-airbyte-cdk/README.md b/llama-index-integrations/readers/llama-index-readers-airbyte-cdk/README.md
index 534cdab3b2..bf621cf2a4 100644
--- a/llama-index-integrations/readers/llama-index-readers-airbyte-cdk/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-airbyte-cdk/README.md
@@ -1,10 +1,14 @@
 # Airbyte CDK Loader
 
+```bash
+pip install llama-index-readers-airbyte-cdk
+```
+
 The Airbyte CDK Loader is a shim for sources created using the [Airbyte Python CDK](https://docs.airbyte.com/connector-development/cdk-python/). It allows you to load data from any Airbyte source into LlamaIndex.
 
 ## Installation
 
-- Install llama_hub: `pip install llama_hub`
+- Install llama-index reader: `pip install llama-index-readers-airbyte-cdk`
 - Install airbyte-cdk: `pip install airbyte-cdk`
 - Install a source via git (or implement your own): `pip install git+https://github.com/airbytehq/airbyte.git@master#egg=source_github&subdirectory=airbyte-integrations/connectors/source-github`
 
@@ -15,8 +19,7 @@ Implement and import your own source. You can find lots of resources for how to
 Here's an example usage of the AirbyteCdkReader.
 
 ```python
-from llama_index import download_loader
-from llama_hub.airbyte_cdk import AirbyteCDKReader
+from llama_index.readers.airbyte_cdk import AirbyteCDKReader
 from source_github.source import (
     SourceGithub,
 )  # this is just an example, you can use any source here - this one is loaded from the Airbyte Github repo via pip install git+https://github.com/airbytehq/airbyte.git@master#egg=source_github&subdirectory=airbyte-integrations/connectors/source-github`
diff --git a/llama-index-integrations/readers/llama-index-readers-airbyte-gong/README.md b/llama-index-integrations/readers/llama-index-readers-airbyte-gong/README.md
index 94a7c3f130..20b46ee18e 100644
--- a/llama-index-integrations/readers/llama-index-readers-airbyte-gong/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-airbyte-gong/README.md
@@ -1,18 +1,17 @@
 # Airbyte Gong Loader
 
-The Airbyte Gong Loader allows you to access different Gong objects.
-
-## Installation
+```bash
+pip install llama-index-readers-airbyte-gong
+```
 
-- Install llama_hub: `pip install llama_hub`
-- Install the gong source: `pip install airbyte-source-gong`
+The Airbyte Gong Loader allows you to access different Gong objects.
 
 ## Usage
 
 Here's an example usage of the AirbyteGongReader.
 
 ```python
-from llama_hub.airbyte_gong import AirbyteGongReader
+from llama_index.readers.airbyte_gong import AirbyteGongReader
 
 gong_config = {
     # ...
diff --git a/llama-index-integrations/readers/llama-index-readers-airbyte-hubspot/README.md b/llama-index-integrations/readers/llama-index-readers-airbyte-hubspot/README.md
index 34fca9a731..29eef93781 100644
--- a/llama-index-integrations/readers/llama-index-readers-airbyte-hubspot/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-airbyte-hubspot/README.md
@@ -1,18 +1,17 @@
 # Airbyte Hubspot Loader
 
-The Airbyte Hubspot Loader allows you to access different Hubspot objects.
-
-## Installation
+```bash
+pip install llama-index-readers-airbyte-hubspot
+```
 
-- Install llama_hub: `pip install llama_hub`
-- Install the hubspot source: `pip install airbyte-source-hubspot`
+The Airbyte Hubspot Loader allows you to access different Hubspot objects.
 
 ## Usage
 
 Here's an example usage of the AirbyteHubspotReader.
 
 ```python
-from llama_hub.airbyte_hubspot import AirbyteHubspotReader
+from llama_index.readers.airbyte_hubspot import AirbyteHubspotReader
 
 hubspot_config = {
     # ...
diff --git a/llama-index-integrations/readers/llama-index-readers-airbyte-salesforce/README.md b/llama-index-integrations/readers/llama-index-readers-airbyte-salesforce/README.md
index 375ea08282..7b03d4492a 100644
--- a/llama-index-integrations/readers/llama-index-readers-airbyte-salesforce/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-airbyte-salesforce/README.md
@@ -1,18 +1,17 @@
 # Airbyte Salesforce Loader
 
-The Airbyte Salesforce Loader allows you to access different Salesforce objects.
-
-## Installation
+```bash
+pip install llama-index-readers-airbyte-salesforce
+```
 
-- Install llama_hub: `pip install llama_hub`
-- Install the salesforce source: `pip install airbyte-source-salesforce`
+The Airbyte Salesforce Loader allows you to access different Salesforce objects.
 
 ## Usage
 
 Here's an example usage of the AirbyteSalesforceReader.
 
 ```python
-from llama_hub.airbyte_salesforce import AirbyteSalesforceReader
+from llama_index.readers.airbyte_salesforce import AirbyteSalesforceReader
 
 salesforce_config = {
     # ...
diff --git a/llama-index-integrations/readers/llama-index-readers-airbyte-shopify/README.md b/llama-index-integrations/readers/llama-index-readers-airbyte-shopify/README.md
index 8802120283..c2c250b552 100644
--- a/llama-index-integrations/readers/llama-index-readers-airbyte-shopify/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-airbyte-shopify/README.md
@@ -1,18 +1,17 @@
 # Airbyte Shopify Loader
 
-The Airbyte Shopify Loader allows you to access different Shopify objects.
-
-## Installation
+```bash
+pip install llama-index-readers-airbyte-shopify
+```
 
-- Install llama_hub: `pip install llama_hub`
-- Install the shopify source: `pip install airbyte-source-shopify`
+The Airbyte Shopify Loader allows you to access different Shopify objects.
 
 ## Usage
 
 Here's an example usage of the AirbyteShopifyReader.
 
 ```python
-from llama_hub.airbyte_shopify import AirbyteShopifyReader
+from llama_index.readers.airbyte_shopify import AirbyteShopifyReader
 
 shopify_config = {
     # ...
diff --git a/llama-index-integrations/readers/llama-index-readers-airbyte-stripe/README.md b/llama-index-integrations/readers/llama-index-readers-airbyte-stripe/README.md
index 094255a574..96b9dfe3de 100644
--- a/llama-index-integrations/readers/llama-index-readers-airbyte-stripe/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-airbyte-stripe/README.md
@@ -1,18 +1,17 @@
 # Airbyte Stripe Loader
 
-The Airbyte Stripe Loader allows you to access different Stripe objects.
-
-## Installation
+```bash
+pip install llama-index-readers-airbyte-stripe
+```
 
-- Install llama_hub: `pip install llama_hub`
-- Install the stripe source: `pip install airbyte-source-stripe`
+The Airbyte Stripe Loader allows you to access different Stripe objects.
 
 ## Usage
 
 Here's an example usage of the AirbyteStripeReader.
 
 ```python
-from llama_hub.airbyte_stripe import AirbyteStripeReader
+from llama_index.readers.airbyte_stripe import AirbyteStripeReader
 
 stripe_config = {
     # ...
diff --git a/llama-index-integrations/readers/llama-index-readers-airbyte-typeform/README.md b/llama-index-integrations/readers/llama-index-readers-airbyte-typeform/README.md
index bb9338d260..a4f7ee9760 100644
--- a/llama-index-integrations/readers/llama-index-readers-airbyte-typeform/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-airbyte-typeform/README.md
@@ -1,18 +1,17 @@
 # Airbyte Typeform Loader
 
-The Airbyte Typeform Loader allows you to access different Typeform objects.
-
-## Installation
+```bash
+pip install llama-index-readers-airbyte-typeform
+```
 
-- Install llama_hub: `pip install llama_hub`
-- Install the typeform source: `pip install airbyte-source-typeform`
+The Airbyte Typeform Loader allows you to access different Typeform objects.
 
 ## Usage
 
 Here's an example usage of the AirbyteTypeformReader.
 
 ```python
-from llama_hub.airbyte_typeform import AirbyteTypeformReader
+from llama_index.readers.airbyte_typeform import AirbyteTypeformReader
 
 typeform_config = {
     # ...
diff --git a/llama-index-integrations/readers/llama-index-readers-airbyte-zendesk-support/README.md b/llama-index-integrations/readers/llama-index-readers-airbyte-zendesk-support/README.md
index 8e9359053e..72aa30eb1c 100644
--- a/llama-index-integrations/readers/llama-index-readers-airbyte-zendesk-support/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-airbyte-zendesk-support/README.md
@@ -1,18 +1,19 @@
 # Airbyte ZendeskSupport Loader
 
-The Airbyte ZendeskSupport Loader allows you to access different ZendeskSupport objects.
-
-## Installation
+```bash
+pip install llama-index-readers-airbyte-zendesk-support
+```
 
-- Install llama_hub: `pip install llama_hub`
-- Install the zendesk_support source: `pip install airbyte-source-zendesk-support`
+The Airbyte ZendeskSupport Loader allows you to access different ZendeskSupport objects.
 
 ## Usage
 
 Here's an example usage of the AirbyteZendeskSupportReader.
 
 ```python
-from llama_hub.airbyte_zendesk_support import AirbyteZendeskSupportReader
+from llama_index.readers.airbyte_zendesk_support import (
+    AirbyteZendeskSupportReader,
+)
 
 zendesk_support_config = {
     # ...
diff --git a/llama-index-integrations/readers/llama-index-readers-airtable/README.md b/llama-index-integrations/readers/llama-index-readers-airtable/README.md
index 64197e51db..ab47b7c07d 100644
--- a/llama-index-integrations/readers/llama-index-readers-airtable/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-airtable/README.md
@@ -1,5 +1,9 @@
 # Airtable Loader
 
+```bash
+pip install llama-index-readers-airtable
+```
+
 This loader loads documents from Airtable. The user specifies an API token to initialize the AirtableReader. They then specify a `table_id` and a `base_id` to load in the corresponding Document objects.
 
 ## Usage
@@ -7,10 +11,9 @@ This loader loads documents from Airtable. The user specifies an API token to in
 Here's an example usage of the AirtableReader.
 
 ```python
-from llama_index import download_loader
 import os
 
-AirtableReader = download_loader("AirtableReader")
+from llama_index.readers.airtable import AirtableReader
 
 reader = AirtableReader("<Airtable_TOKEN>")
 documents = reader.load_data(table_id="<TABLE_ID>", base_id="<BASE_ID>")
diff --git a/llama-index-integrations/readers/llama-index-readers-apify/README.md b/llama-index-integrations/readers/llama-index-readers-apify/README.md
index e752e540db..a52ae9787d 100644
--- a/llama-index-integrations/readers/llama-index-readers-apify/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-apify/README.md
@@ -1,5 +1,9 @@
 # Apify Loaders
 
+```bash
+pip install llama-index-readers-apify
+```
+
 ## Apify Actor Loader
 
 [Apify](https://apify.com/) is a cloud platform for web scraping and data extraction,
@@ -20,8 +24,7 @@ To use this loader, you need to have a (free) Apify account
 and set your [Apify API token](https://console.apify.com/account/integrations) in the code.
 
 ```python
-from llama_index import download_loader
-from llama_index.readers.schema import Document
+from llama_index.core import Document
 
 
 # Converts a single record from the Actor's resulting dataset to the LlamaIndex format
@@ -34,7 +37,7 @@ def tranform_dataset_item(item):
     )
 
 
-ApifyActor = download_loader("ApifyActor")
+from llama_index.readers.apify import ApifyActor
 
 reader = ApifyActor("<My Apify API token>")
 documents = reader.load_data(
@@ -72,8 +75,7 @@ To use this loader, you need to have a (free) Apify account
 and set your [Apify API token](https://console.apify.com/account/integrations) in the code.
 
 ```python
-from llama_index import download_loader
-from llama_index.readers.schema import Document
+from llama_index.core import Document
 
 
 # Converts a single record from the Apify dataset to the LlamaIndex format
@@ -86,7 +88,7 @@ def tranform_dataset_item(item):
     )
 
 
-ApifyDataset = download_loader("ApifyDataset")
+from llama_index.readers.apify import ApifyDataset
 
 reader = ApifyDataset("<Your Apify API token>")
 documents = reader.load_data(
diff --git a/llama-index-integrations/readers/llama-index-readers-arango-db/README.md b/llama-index-integrations/readers/llama-index-readers-arango-db/README.md
index e31489ee1e..d8fef88eb1 100644
--- a/llama-index-integrations/readers/llama-index-readers-arango-db/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-arango-db/README.md
@@ -1,5 +1,9 @@
 # LlamaIndex Readers Integration: Arango Db
 
+```bash
+pip install llama-index-readers-arango-db
+```
+
 This loader loads documents from ArangoDB. The user specifies a ArangoDB instance to
 initialize the reader. They then specify the collection name and query params to
 fetch the relevant docs.
@@ -9,10 +13,9 @@ fetch the relevant docs.
 Here's an example usage of the SimpleArangoDBReader.
 
 ```python
-from llama_index.core.readers import download_loader
 import os
 
-SimpleArangoDBReader = download_loader("SimpleArangoDBReader")
+from llama_index.readers.arango_db import SimpleArangoDBReader
 
 host = "<host>"
 db_name = "<db_name>"
@@ -32,4 +35,4 @@ documents = reader.load_data(
 )
 ```
 
-This loader is designed to be used as a way to load data into [LlamaIndex](https://github.com/run-llama/llama_index/tree/main/llama_index) and/or subsequently used as a Tool in a [LangChain](https://github.com/hwchase17/langchain) Agent. See [here](https://github.com/run-llama/llama-hub/tree/main/llama_hub) for examples.
+This loader is designed to be used as a way to load data into [LlamaIndex](https://github.com/run-llama/llama_index/tree/main/llama_index) and/or subsequently used as a Tool in a [LangChain](https://github.com/hwchase17/langchain) Agent.
diff --git a/llama-index-integrations/readers/llama-index-readers-asana/README.md b/llama-index-integrations/readers/llama-index-readers-asana/README.md
index 2bd439c33e..cbfe43f829 100644
--- a/llama-index-integrations/readers/llama-index-readers-asana/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-asana/README.md
@@ -1,5 +1,9 @@
 # Asana Loader
 
+```bash
+pip install llama-index-readers-asana
+```
+
 This loader loads documents from Asana. The user specifies an API token to initialize the AsanaReader. They then specify a `workspace_id` OR a `project_id` to load in the corresponding Document objects.
 
 ## Usage
@@ -7,10 +11,9 @@ This loader loads documents from Asana. The user specifies an API token to initi
 Here's an example usage of the AsanaReader.
 
 ```python
-from llama_index import download_loader
 import os
 
-AsanaReader = download_loader("AsanaReader")
+from llama_index.readers.asana import AsanaReader
 
 reader = AsanaReader("<ASANA_TOKEN>")
 
diff --git a/llama-index-integrations/readers/llama-index-readers-assemblyai/README.md b/llama-index-integrations/readers/llama-index-readers-assemblyai/README.md
index e0e7d14cfe..36fdd1c658 100644
--- a/llama-index-integrations/readers/llama-index-readers-assemblyai/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-assemblyai/README.md
@@ -1,5 +1,9 @@
 # AssemblyAI Audio Transcript Loader
 
+```bash
+pip install llama-index-readers-assemblyai
+```
+
 The AssemblyAI Audio Transcript Loader allows to transcribe audio files with the [AssemblyAI API](https://www.assemblyai.com/) and loads the transcribed text into documents.
 
 To use it, you should have the `assemblyai` python package installed, and the environment variable `ASSEMBLYAI_API_KEY` set with your API key. Alternatively, the API key can also be passed as an argument.
@@ -10,40 +14,12 @@ More info about AssemblyAI:
 - [Get a Free API key](https://www.assemblyai.com/dashboard/signup)
 - [AssemblyAI API Docs](https://www.assemblyai.com/docs)
 
-## Installation
-
-First, you need to install the `assemblyai` python package.
-
-You can find more info about it inside the [assemblyai-python-sdk GitHub repo](https://github.com/AssemblyAI/assemblyai-python-sdk).
-
-```bash
-pip install assemblyai
-```
-
-Optionally: You can install the AssemblyAI integration yourself with:
-
-```bash
-pip install llama-index-readers-assemblyai
-```
-
-Then you can import it with:
-
-```python
-from llama_index.readers.assemblyai import AssemblyAIAudioTranscriptReader
-```
-
-As an alternative, you can also use the `download_loader()` to install and use this integration (see next section).
-
 ## Usage
 
 The `AssemblyAIAudioTranscriptReader` needs at least the `file_path` argument. Audio files can be specified as an URL or a local file path.
 
 ```python
-from llama_index.core import download_loader
-
-AssemblyAIAudioTranscriptReader = download_loader(
-    "AssemblyAIAudioTranscriptReader"
-)
+from llama_index.readers.assemblyai import AssemblyAIAudioTranscriptReader
 
 audio_file = "https://storage.googleapis.com/aai-docs-samples/nbc.mp3"
 # or a local file path: audio_file = "./nbc.mp3"
diff --git a/llama-index-integrations/readers/llama-index-readers-astra-db/README.md b/llama-index-integrations/readers/llama-index-readers-astra-db/README.md
index bd7b4e7cde..eda6a64ad4 100644
--- a/llama-index-integrations/readers/llama-index-readers-astra-db/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-astra-db/README.md
@@ -1,5 +1,9 @@
 # Astra DB Loader
 
+```bash
+pip install llama-index-readers-astra-db
+```
+
 The Astra DB Loader returns a set of documents retrieved from Astra DB.
 The user initializes the loader with an Astra DB index. They then pass in a vector.
 
@@ -10,8 +14,6 @@ Here's an example usage of the AstraDBReader.
 ```python
 from openai import OpenAI
 
-from llama_index import download_loader
-
 
 # Get the credentials for Astra DB
 api_endpoint = "https://324<...>f1c.astra.datastax.com"
@@ -29,7 +31,7 @@ response = client.embeddings.create(
 query_vector = response.data[0].embedding
 
 # Initialize the Reader object
-AstraDBReader = download_loader("AstraDBReader")
+from llama_index.readers.astra_db import AstraDBReader
 
 # Your Astra DB Account will provide you with the endpoint URL and Token
 reader = AstraDBReader(
diff --git a/llama-index-integrations/readers/llama-index-readers-athena/README.md b/llama-index-integrations/readers/llama-index-readers-athena/README.md
index afff8d84f5..082e2ca83c 100644
--- a/llama-index-integrations/readers/llama-index-readers-athena/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-athena/README.md
@@ -1,5 +1,11 @@
 # Athena reader.
 
+```bash
+pip install llama-index-readers-athena
+
+pip install llama-index-llms-openai
+```
+
 Athena reader allow execute SQL with AWS Athena. We using SQLAlchemy and PyAthena under the hood.
 
 ## Permissions
@@ -13,10 +19,10 @@ Here's an example usage of the AthenaReader.
 ```
 import os
 import dotenv
-from llama_index import SQLDatabase,ServiceContext
-from llama_index.indices.struct_store import NLSQLTableQueryEngine
-from llama_index.llms import OpenAI
-from llama_hub.athena import AthenaReader
+from llama_index.core import SQLDatabase,ServiceContext
+from llama_index.core.query_engine import NLSQLTableQueryEngine
+from llama_index.llms.openai import OpenAI
+from llama_index.readers.athena import AthenaReader
 
 dotenv.load_dotenv()
 
diff --git a/llama-index-integrations/readers/llama-index-readers-azcognitive-search/README.md b/llama-index-integrations/readers/llama-index-readers-azcognitive-search/README.md
index 953bef038c..9891527c4a 100644
--- a/llama-index-integrations/readers/llama-index-readers-azcognitive-search/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-azcognitive-search/README.md
@@ -1,5 +1,9 @@
 # Azure Cognitive Search Loader
 
+```bash
+pip install llama-index-readers-azcognitive-search
+```
+
 The AzCognitiveSearchReader Loader returns a set of texts corresponding to documents retrieved from specific index of Azure Cognitive Search.
 The user initializes the loader with credentials (service name and key) and the index name.
 
@@ -8,9 +12,7 @@ The user initializes the loader with credentials (service name and key) and the
 Here's an example usage of the AzCognitiveSearchReader.
 
 ```python
-from llama_index import download_loader
-
-AzCognitiveSearchReader = download_loader("AzCognitiveSearchReader")
+from llama_index.readers.azcognitive_search import AzCognitiveSearchReader
 
 reader = AzCognitiveSearchReader(
     "<Azure_Cognitive_Search_NAME>",
@@ -30,11 +32,11 @@ documents = reader.load_data(
 ## Usage in combination with langchain
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 from langchain.chains.conversation.memory import ConversationBufferMemory
 from langchain.agents import Tool, AgentExecutor, load_tools, initialize_agent
 
-AzCognitiveSearchReader = download_loader("AzCognitiveSearchReader")
+from llama_index.readers.azcognitive_search import AzCognitiveSearchReader
 
 az_loader = AzCognitiveSearchReader(
     COGNITIVE_SEARCH_SERVICE_NAME, COGNITIVE_SEARCH_KEY, INDEX_NAME
diff --git a/llama-index-integrations/readers/llama-index-readers-azstorage-blob/README.md b/llama-index-integrations/readers/llama-index-readers-azstorage-blob/README.md
index 306aec7836..e4c1adcbfb 100644
--- a/llama-index-integrations/readers/llama-index-readers-azstorage-blob/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-azstorage-blob/README.md
@@ -1,5 +1,9 @@
 # Azure Storage Blob Loader
 
+```bash
+pip install llama-index-readers-azstorage-blob
+```
+
 This loader parses any file stored as an Azure Storage blob or the entire container (with an optional prefix / attribute filter) if no particular file is specified. When initializing `AzStorageBlobReader`, you may pass in your account url with a SAS token or crdentials to authenticate.
 
 All files are temporarily downloaded locally and subsequently parsed with `SimpleDirectoryReader`. Hence, you may also specify a custom `file_extractor`, relying on any of the loaders in this library (or your own)! If you need a clue on finding the file extractor object because you'd like to use your own file extractor, follow this sample.
@@ -20,9 +24,7 @@ To use this loader, you need to pass in the name of your Azure Storage Container
 ### Using a Storage Account SAS URL
 
 ```python
-from llama_index import download_loader
-
-AzStorageBlobReader = download_loader("AzStorageBlobReader")
+from llama_index.readers.azstorage_blob import AzStorageBlobReader
 
 loader = AzStorageBlobReader(
     container="scrabble-dictionary",
@@ -38,9 +40,7 @@ documents = loader.load_data()
 The sample below will download all files in a container, by only specifying the storage account's connection string and the container name.
 
 ```python
-from llama_index import download_loader
-
-AzStorageBlobReader = download_loader("AzStorageBlobReader")
+from llama_index.readers.azstorage_blob import AzStorageBlobReader
 
 loader = AzStorageBlobReader(
     container_name="<CONTAINER_NAME>",
@@ -57,12 +57,11 @@ Ensure the Azure Identity library is available `pip install azure-identity`
 The sample below downloads all files in the container using the default credential, alternative credential options are available such as a service principal `ClientSecretCredential`
 
 ```python
-from llama_index import download_loader
 from azure.identity import DefaultAzureCredential
 
 default_credential = DefaultAzureCredential()
 
-AzStorageBlobReader = download_loader("AzStorageBlobReader")
+from llama_index.readers.azstorage_blob import AzStorageBlobReader
 
 loader = AzStorageBlobReader(
     container_name="scrabble-dictionary",
diff --git a/llama-index-integrations/readers/llama-index-readers-bilibili/README.md b/llama-index-integrations/readers/llama-index-readers-bilibili/README.md
index 36bc3b949e..66e93880f7 100644
--- a/llama-index-integrations/readers/llama-index-readers-bilibili/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-bilibili/README.md
@@ -1,5 +1,9 @@
 # Bilibili Transcript Loader
 
+```bash
+pip install llama-index-readers-bilibili
+```
+
 This loader utilizes the `bilibili_api` to fetch the text transcript from Bilibili, one of the most beloved long-form video sites in China.
 
 With this BilibiliTranscriptReader, users can easily obtain the transcript of their desired video content on the platform.
@@ -9,9 +13,8 @@ With this BilibiliTranscriptReader, users can easily obtain the transcript of th
 To use this loader, you need to pass in an array of Bilibili video links.
 
 ```python
-from llama_index import download_loader
+from llama_index.readers.bilibili import BilibiliTranscriptReader
 
-BilibiliTranscriptReader = download_loader("BilibiliTranscriptReader")
 loader = BilibiliTranscriptReader()
 documents = loader.load_data(
     video_urls=["https://www.bilibili.com/video/BV1yx411L73B/"]
diff --git a/llama-index-integrations/readers/llama-index-readers-bitbucket/README.md b/llama-index-integrations/readers/llama-index-readers-bitbucket/README.md
index a5f4c50659..2edc08dade 100644
--- a/llama-index-integrations/readers/llama-index-readers-bitbucket/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-bitbucket/README.md
@@ -1,5 +1,9 @@
 # Bitbucket Loader
 
+```bash
+pip install llama-index-readers-bitbucket
+```
+
 This loader utilizes the Bitbucket API to load the files inside a Bitbucket repository as Documents in an index.
 
 ## Usage
@@ -8,7 +12,7 @@ To use this loader, you need to provide as environment variables the `BITBUCKET_
 
 ```python
 import os
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 
 os.environ["BITBUCKET_USERNAME"] = "myusername"
 os.environ["BITBUCKET_API_KEY"] = "myapikey"
@@ -16,7 +20,7 @@ os.environ["BITBUCKET_API_KEY"] = "myapikey"
 base_url = "https://myserver/bitbucket"
 project_key = "mykey"
 
-BitbucketReader = download_loader("BitbucketReader")
+from llama_index.readers.bitbucket import BitbucketReader
 
 loader = BitbucketReader(
     base_url=base_url,
diff --git a/llama-index-integrations/readers/llama-index-readers-boarddocs/README.md b/llama-index-integrations/readers/llama-index-readers-boarddocs/README.md
index d10393b5c8..ce037d0cc1 100644
--- a/llama-index-integrations/readers/llama-index-readers-boarddocs/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-boarddocs/README.md
@@ -1,5 +1,9 @@
 # BoardDocs Loader
 
+```bash
+pip install llama-index-readers-boarddocs
+```
+
 This loader retrieves an agenda and associated material from a BoardDocs site.
 
 This loader is not endorsed by, developed by, supported by, or in any way formally affiliated with Diligent Corporation.
@@ -10,9 +14,7 @@ To use this loader, you'll need to specify which BoardDocs site you want to load
 as well as the committee on the site you want to scrape.
 
 ```python
-from llama_index import download_loader
-
-BoardDocsReader = download_loader("BoardDocsReader")
+from llama_index.readers.boarddocs import BoardDocsReader
 
 # For a site URL https://go.boarddocs.com/ca/redwood/Board.nsf/Public
 # your site should be set to 'ca/redwood'
diff --git a/llama-index-integrations/readers/llama-index-readers-confluence/README.md b/llama-index-integrations/readers/llama-index-readers-confluence/README.md
index e1cf202d4a..4b55e6c1d6 100644
--- a/llama-index-integrations/readers/llama-index-readers-confluence/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-confluence/README.md
@@ -1,5 +1,9 @@
 # Confluence Loader
 
+```bash
+pip install llama-index-readers-confluence
+```
+
 This loader loads pages from a given Confluence cloud instance. The user needs to specify the base URL for a Confluence
 instance to initialize the ConfluenceReader - base URL needs to end with `/wiki`. The user can optionally specify
 OAuth 2.0 credentials to authenticate with the Confluence instance. If no credentials are specified, the loader will
@@ -42,7 +46,7 @@ Here's an example usage of the ConfluenceReader.
 
 ```python
 # Example that reads the pages with the `page_ids`
-from llama_hub.confluence import ConfluenceReader
+from llama_index.readers.confluence import ConfluenceReader
 
 token = {"access_token": "<access_token>", "token_type": "<token_type>"}
 oauth2_dict = {"client_id": "<client_id>", "token": token}
@@ -65,7 +69,7 @@ documents.extend(
 
 ```python
 # Example that fetches the first 5, then the next 5 pages from a space
-from llama_hub.confluence import ConfluenceReader
+from llama_index.readers.confluence import ConfluenceReader
 
 token = {"access_token": "<access_token>", "token_type": "<token_type>"}
 oauth2_dict = {"client_id": "<client_id>", "token": token}
@@ -95,7 +99,7 @@ documents.extend(
 
 ```python
 # Example that fetches the first 5 results froma cql query, the uses the cursor to pick up on the next element
-from llama_hub.confluence import ConfluenceReader
+from llama_index.readers.confluence import ConfluenceReader
 
 token = {"access_token": "<access_token>", "token_type": "<token_type>"}
 oauth2_dict = {"client_id": "<client_id>", "token": token}
diff --git a/llama-index-integrations/readers/llama-index-readers-couchbase/README.md b/llama-index-integrations/readers/llama-index-readers-couchbase/README.md
index 574ba00b2a..f30013bf7a 100644
--- a/llama-index-integrations/readers/llama-index-readers-couchbase/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-couchbase/README.md
@@ -1,5 +1,9 @@
 # LlamaIndex Readers Integration: Couchbase
 
+```bash
+pip install llama-index-readers-couchbase
+```
+
 This loader loads documents from Couchbase cluster.
 The user specifies a Couchbase client or credentials to initialize the reader. They can specify the SQL++ query to
 fetch the relevant docs.
@@ -9,10 +13,9 @@ fetch the relevant docs.
 Here's an example usage of the CouchbaseReader.
 
 ```python
-from llama_index.core.readers import download_loader
 import os
 
-CouchbaseLoader = download_loader("CouchbaseReader")
+from llama_index.readers.couchbase import CouchbaseReader
 
 connection_string = (
     "couchbase://localhost"  # valid Couchbase connection string
diff --git a/llama-index-integrations/readers/llama-index-readers-couchdb/README.md b/llama-index-integrations/readers/llama-index-readers-couchdb/README.md
index d738eb39b9..cd3b51286c 100644
--- a/llama-index-integrations/readers/llama-index-readers-couchdb/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-couchdb/README.md
@@ -1,5 +1,9 @@
 # CouchDB Loader
 
+```bash
+pip install llama-index-readers-couchdb
+```
+
 This loader loads documents from CouchDB. The loader currently supports CouchDB 3.x
 using the CouchDB3 python wrapper from https://github.com/n-vlahovic/couchdb3
 The user specifies a CouchDB instance to initialize the reader. They then specify
@@ -10,10 +14,9 @@ the database name and query params to fetch the relevant docs.
 Here's an example usage of the SimpleCouchDBReader.
 
 ```python
-from llama_index import download_loader
 import os
 
-SimpleCouchDBReader = download_loader("SimpleCouchDBReader")
+from llama_index.readers.couchdb import SimpleCouchDBReader
 
 host = "<host>"
 port = "<port>"
diff --git a/llama-index-integrations/readers/llama-index-readers-dad-jokes/README.md b/llama-index-integrations/readers/llama-index-readers-dad-jokes/README.md
index f786ca3b51..6e07eeffcb 100644
--- a/llama-index-integrations/readers/llama-index-readers-dad-jokes/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-dad-jokes/README.md
@@ -1,5 +1,9 @@
 # DadJoke Loader
 
+```bash
+pip install llama-index-readers-dad-jokes
+```
+
 This loader fetches a joke from icanhazdadjoke.
 
 ## Usage
@@ -7,9 +11,7 @@ This loader fetches a joke from icanhazdadjoke.
 To use this loader, load it.
 
 ```python
-from llama_index import download_loader
-
-DadJokesReader = download_loader("DadJokesReader")
+from llama_index.readers.dad_jokes import DadJokesReader
 
 loader = DadJokesReader()
 documents = loader.load_data()
diff --git a/llama-index-integrations/readers/llama-index-readers-discord/README.md b/llama-index-integrations/readers/llama-index-readers-discord/README.md
index 522c28c3a6..c925cf9539 100644
--- a/llama-index-integrations/readers/llama-index-readers-discord/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-discord/README.md
@@ -1,5 +1,9 @@
 # Discord Loader
 
+```bash
+pip install llama-index-readers-discord
+```
+
 This loader loads conversations from Discord. The user specifies `channel_ids` and we fetch conversations from
 those `channel_ids`.
 
@@ -8,10 +12,9 @@ those `channel_ids`.
 Here's an example usage of the DiscordReader.
 
 ```python
-from llama_index import download_loader
 import os
 
-DiscordReader = download_loader("DiscordReader")
+from llama_index.readers.discord import DiscordReader
 
 discord_token = os.getenv("DISCORD_TOKEN")
 channel_ids = [1057178784895348746]  # Replace with your channel_id
diff --git a/llama-index-integrations/readers/llama-index-readers-docugami/README.md b/llama-index-integrations/readers/llama-index-readers-docugami/README.md
index 764a6b6038..e31a22c9ee 100644
--- a/llama-index-integrations/readers/llama-index-readers-docugami/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-docugami/README.md
@@ -1,5 +1,9 @@
 # Docugami Loader
 
+```bash
+pip install llama-index-readers-docugami
+```
+
 This loader takes in IDs of PDF, DOCX or DOC files processed by [Docugami](https://docugami.com) and returns nodes in a Document XML Knowledge Graph for each document. This is a rich representation that includes the semantic and structural characteristics of various chunks in the document as an XML tree. Entire sets of documents are processed, resulting in forests of XML semantic trees.
 
 ## Pre-requisites
@@ -14,9 +18,7 @@ This loader takes in IDs of PDF, DOCX or DOC files processed by [Docugami](https
 To use this loader, you simply need to pass in a Docugami Doc Set ID, and optionally an array of Document IDs (by default, all documents in the Doc Set are loaded).
 
 ```python
-from llama_index.core import download_loader
-
-DocugamiReader = download_loader("DocugamiReader")
+from llama_index.readers.docugami import DocugamiReader
 
 docset_id = "tjwrr2ekqkc3"
 document_ids = ["ui7pkriyckwi", "1be3o7ch10iy"]
diff --git a/llama-index-integrations/readers/llama-index-readers-earnings-call-transcript/README.md b/llama-index-integrations/readers/llama-index-readers-earnings-call-transcript/README.md
index 1eb7b9f247..bd5e23b64f 100644
--- a/llama-index-integrations/readers/llama-index-readers-earnings-call-transcript/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-earnings-call-transcript/README.md
@@ -1,5 +1,9 @@
 # EARNING CALL TRANSCRIPTS LOADER
 
+```bash
+pip install llama-index-readers-earnings-call-transcript
+```
+
 This loader fetches the earning call transcripts of US based companies from the website [discountingcashflows.com](https://discountingcashflows.com/). It is not available for commercial purposes
 
 Install the required dependencies
@@ -17,9 +21,7 @@ The Earning call transcripts takes in three arguments
 ## Usage
 
 ```python
-from llama_index import download_loader
-
-EarningsCallTranscript = download_loader("EarningsCallTranscript")
+from llama_index.readers.earnings_call_transcript import EarningsCallTranscript
 
 loader = EarningsCallTranscript(2023, "AAPL", "Q3")
 docs = loader.load_data()
@@ -37,10 +39,9 @@ The metadata of the transcripts are the following
 #### Llama Index
 
 ```python
-from llama_index import download_loader
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 
-EarningsCallTranscript = download_loader("EarningsCallTranscript")
+from llama_index.readers.earnings_call_transcript import EarningsCallTranscript
 
 loader = EarningsCallTranscript(2023, "AAPL", "Q3")
 docs = loader.load_data()
@@ -57,13 +58,12 @@ print(response)
 #### Langchain
 
 ```python
-from llama_index import download_loader
 from langchain.agents import Tool
 from langchain.agents import initialize_agent
 from langchain.chat_models import ChatOpenAI
 from langchain.llms import OpenAI
 
-EarningsCallTranscript = download_loader("EarningsCallTranscript")
+from llama_index.readers.earnings_call_transcript import EarningsCallTranscript
 
 loader = EarningsCallTranscript(2023, "AAPL", "Q3")
 docs = loader.load_data()
diff --git a/llama-index-integrations/readers/llama-index-readers-feedly-rss/README.md b/llama-index-integrations/readers/llama-index-readers-feedly-rss/README.md
index fa201e53b9..3072a55fe6 100644
--- a/llama-index-integrations/readers/llama-index-readers-feedly-rss/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-feedly-rss/README.md
@@ -1,13 +1,15 @@
 # Feedly Loader
 
+```bash
+pip install llama-index-readers-feedly-rss
+```
+
 This loader fetches the entries from a list of RSS feeds subscribed in [Feedly](https://feedly.com). You must initialize the loader with your [Feedly API token](https://developer.feedly.com), and then pass the category name which you want to extract.
 
 ## Usage
 
 ```python
-from llama_index import download_loader
-
-feedlyRssReader = download_loader("FeedlyRssReader")
+from llama_index.readers.feedly_rss import FeedlyRssReader
 
 loader = feedlyRssReader(bearer_token="[YOUR_TOKEN]")
 documents = loader.load_data(category_name="news", max_count=100)
diff --git a/llama-index-integrations/readers/llama-index-readers-feishu-docs/README.md b/llama-index-integrations/readers/llama-index-readers-feishu-docs/README.md
index b28f90cfd6..2c378943de 100644
--- a/llama-index-integrations/readers/llama-index-readers-feishu-docs/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-feishu-docs/README.md
@@ -1,5 +1,9 @@
 # Feishu Doc Loader
 
+```bash
+pip install llama-index-readers-feishu-docs
+```
+
 This loader takes in IDs of Feishu Docs and parses their text into `documents`. You can extract a Feishu Doc's ID directly from its URL. For example, the ID of `https://test-csl481dfkgqf.feishu.cn/docx/HIH2dHv21ox9kVxjRuwc1W0jnkf` is `HIH2dHv21ox9kVxjRuwc1W0jnkf`. As a prerequisite, you will need to register with Feishu and build an custom app. See [here](https://open.feishu.cn/document/home/introduction-to-custom-app-development/self-built-application-development-process) for instructions.
 
 ## Usage
@@ -7,12 +11,11 @@ This loader takes in IDs of Feishu Docs and parses their text into `documents`.
 To use this loader, you simply need to pass in an array of Feishu Doc IDs. The default API endpoints are for Feishu, in order to switch to Lark, we should use `set_lark_domain`.
 
 ```python
-from llama_index import download_loader
-
 app_id = "cli_slkdjalasdkjasd"
 app_secret = "dskLLdkasdjlasdKK"
 doc_ids = ["HIH2dHv21ox9kVxjRuwc1W0jnkf"]
-FeishuDocsReader = download_loader("FeishuDocsReader")
+from llama_index.readers.feishu_docs import FeishuDocsReader
+
 loader = FeishuDocsReader(app_id, app_secret)
 documents = loader.load_data(document_ids=doc_ids)
 ```
diff --git a/llama-index-integrations/readers/llama-index-readers-feishu-wiki/README.md b/llama-index-integrations/readers/llama-index-readers-feishu-wiki/README.md
index 5acccf3c8c..1f1b384ba6 100644
--- a/llama-index-integrations/readers/llama-index-readers-feishu-wiki/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-feishu-wiki/README.md
@@ -11,8 +11,6 @@ To use this loader, you need to:
 3. finally, pass your feishu space id to this loader
 
 ```python
-from llama_index import download_loader
-
 app_id = "xxx"
 app_secret = "xxx"
 space_id = "xxx"
diff --git a/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/image_deplot/README.md b/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/image_deplot/README.md
index 05ba4fe82d..c4869ab48d 100644
--- a/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/image_deplot/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/image_deplot/README.md
@@ -1,5 +1,9 @@
 # Image Tabular Chart Loader (Deplot)
 
+```bash
+pip install llama-index-readers-file
+```
+
 This loader captions an image file containing a tabular chart (bar chart, line charts) using deplot.
 
 ## Usage
@@ -8,7 +12,7 @@ To use this loader, you need to pass in a `Path` to a local file.
 
 ```python
 from pathlib import Path
-from llama_hub.file.image_deplot import ImageTabularChartReader
+from llama_index.readers.file import ImageTabularChartReader
 
 loader = ImageTabularChartReader()
 documents = loader.load_data(file=Path("./image.png"))
diff --git a/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/paged_csv/README.md b/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/paged_csv/README.md
index e46a6d4c9a..a5a0eae2e0 100644
--- a/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/paged_csv/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/paged_csv/README.md
@@ -1,5 +1,9 @@
 # Paged CSV Loader
 
+```bash
+pip install llama-index-readers-file
+```
+
 This loader extracts the text from a local .csv file by formatting each row in an LLM-friendly way and inserting it into a separate Document. A single local file is passed in each time you call `load_data`. For example, a Document might look like:
 
 ```
@@ -15,9 +19,8 @@ To use this loader, you need to pass in a `Path` to a local file.
 
 ```python
 from pathlib import Path
-from llama_index.core.readers import download_loader
 
-PagedCSVReader = download_loader("PagedCSVReader")
+from llama_index.readers.file import PagedCSVReader
 
 loader = PagedCSVReader(encoding="utf-8")
 documents = loader.load_data(file=Path("./transactions.csv"))
diff --git a/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/pymu_pdf/README.md b/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/pymu_pdf/README.md
index 9abd1e99e3..cfde1701d6 100644
--- a/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/pymu_pdf/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/pymu_pdf/README.md
@@ -1,6 +1,10 @@
 # PyMuPDF Loader
 
-This loader extracts text from a local PDF file using the `PyMuPDF` Python library. This is the fastest among all other PDF parsing options available in `llama_hub`. If `metadata` is passed as True while calling `load` function; extracted documents will include basic metadata such as page numbers, file path and total number of pages in pdf.
+```bash
+pip install llama-index-readers-file
+```
+
+This loader extracts text from a local PDF file using the `PyMuPDF` Python library. If `metadata` is passed as True while calling `load` function; extracted documents will include basic metadata such as page numbers, file path and total number of pages in pdf.
 
 ## Usage
 
@@ -8,9 +12,8 @@ To use this loader, you need to pass file path of the local file as string or `P
 
 ```python
 from pathlib import Path
-from llama_index import download_loader
 
-PyMuPDFReader = download_loader("PyMuPDFReader")
+from llama_index.readers.file import PyMuPDFReader
 
 loader = PyMuPDFReader()
 documents = loader.load_data(file_path=Path("./article.pdf"), metadata=True)
diff --git a/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/unstructured/README.md b/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/unstructured/README.md
index 5a59f69ab6..a08ad57e71 100644
--- a/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/unstructured/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/unstructured/README.md
@@ -1,5 +1,9 @@
 # Unstructured.io File Loader
 
+```bash
+pip install llama-index-readers-file
+```
+
 This loader extracts the text from a variety of unstructured text files using [Unstructured.io](https://github.com/Unstructured-IO/unstructured). Currently, the file extensions that are supported are `.txt`, `.docx`, `.pptx`, `.jpg`, `.png`, `.eml`, `.html`, and `.pdf` documents. A single local file is passed in each time you call `load_data`.
 
 Check out their documentation to see more details, but notably, this enables you to parse the unstructured data of many use-cases. For example, you can download the 10-K SEC filings of public companies (e.g. [Coinbase](https://www.sec.gov/ix?doc=/Archives/edgar/data/0001679788/000167978822000031/coin-20211231.htm)), and feed it directly into this loader without worrying about cleaning up the formatting or HTML tags.
@@ -10,7 +14,7 @@ To use this loader, you need to pass in a `Path` to a local file. Optionally, yo
 
 ```python
 from pathlib import Path
-from llama_hub.file.unstructured import UnstructuredReader
+from llama_index.readers.file import UnstructuredReader
 
 loader = UnstructuredReader()
 documents = loader.load_data(file=Path("./10k_filing.html"))
@@ -20,10 +24,9 @@ You can also easily use this loader in conjunction with `SimpleDirectoryReader`
 
 ```python
 from pathlib import Path
-from llama_index import download_loader
-from llama_index import SimpleDirectoryReader
+from llama_index.core import SimpleDirectoryReader
 
-UnstructuredReader = download_loader("UnstructuredReader")
+from llama_index.readers.file import UnstructuredReader
 
 dir_reader = SimpleDirectoryReader(
     "./data",
diff --git a/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/xml/README.md b/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/xml/README.md
index c7860ddc0e..1fa8139080 100644
--- a/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/xml/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/xml/README.md
@@ -1,5 +1,9 @@
 # XML Loader
 
+```bash
+pip install llama-index-readers-file
+```
+
 This loader extracts the text from a local XML file. A single local file is passed in each time you call `load_data`.
 
 ## Usage
@@ -8,12 +12,11 @@ To use this loader, you need to pass in a `Path` to a local file.
 
 ```python
 from pathlib import Path
-from llama_index import download_loader
 
-XMLReader = download_loader("XMLReader")
+from llama_index.readers.file import XMLReader
 
 loader = XMLReader()
 documents = loader.load_data(file=Path("../example.xml"))
 ```
 
-This loader is designed to be used as a way to load data into [LlamaIndex](https://github.com/run-llama/llama_index/tree/main/llama_index) and/or subsequently used as a Tool in a [LangChain](https://github.com/hwchase17/langchain) Agent. See [here](https://github.com/run-llama/llama-hub/tree/main/llama_hub) for examples.
+This loader is designed to be used as a way to load data into [LlamaIndex](https://github.com/run-llama/llama_index/tree/main/llama_index) and/or subsequently used as a Tool in a [LangChain](https://github.com/hwchase17/langchain) Agent.
diff --git a/llama-index-integrations/readers/llama-index-readers-firebase-realtimedb/README.md b/llama-index-integrations/readers/llama-index-readers-firebase-realtimedb/README.md
index ad5ea2f6e4..0894b91d8a 100644
--- a/llama-index-integrations/readers/llama-index-readers-firebase-realtimedb/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-firebase-realtimedb/README.md
@@ -1,5 +1,9 @@
 # Firebase Realtime Database Loader
 
+```bash
+pip install llama-index-readers-firebase-realtimedb
+```
+
 This loader retrieves documents from Firebase Realtime Database. The user specifies the Firebase Realtime Database URL and, optionally, the path to a service account key file for authentication.
 
 ## Usage
@@ -7,10 +11,8 @@ This loader retrieves documents from Firebase Realtime Database. The user specif
 Here's an example usage of the FirebaseRealtimeDatabaseReader.
 
 ```python
-from llama_index import download_loader
-
-FirebaseRealtimeDatabaseReader = download_loader(
-    "FirebaseRealtimeDatabaseReader"
+from llama_index.readers.firebase_realtimedb import (
+    FirebaseRealtimeDatabaseReader,
 )
 
 database_url = "<database_url>"
@@ -20,4 +22,4 @@ reader = FirebaseRealtimeDatabaseReader(database_url, service_account_key_path)
 documents = reader.load_data(path)
 ```
 
-This loader is designed to be used as a way to load data into [LlamaIndex](https://github.com/run-llama/llama_index/tree/main/llama_index) and/or subsequently used as a Tool in a [LangChain](https://github.com/hwchase17/langchain) Agent. See [here](https://github.com/emptycrown/llama-hub/tree/main) for examples.
+This loader is designed to be used as a way to load data into [LlamaIndex](https://github.com/run-llama/llama_index/tree/main/llama_index) and/or subsequently used as a Tool in a [LangChain](https://github.com/hwchase17/langchain) Agent.
diff --git a/llama-index-integrations/readers/llama-index-readers-firestore/README.md b/llama-index-integrations/readers/llama-index-readers-firestore/README.md
index fb3d9b3329..05cad520ae 100644
--- a/llama-index-integrations/readers/llama-index-readers-firestore/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-firestore/README.md
@@ -1,5 +1,9 @@
 # Firestore Loader
 
+```bash
+pip install llama-index-readers-firestore
+```
+
 This loader loads from a Firestore collection or a specific document from Firestore. The loader assumes your project already has the google cloud credentials loaded. To find out how to set up credentials, [see here](https://cloud.google.com/docs/authentication/provide-credentials-adc).
 
 ## Usage
@@ -9,9 +13,8 @@ To initialize the loader, provide the project-id of the google cloud project.
 ## Initializing the reader
 
 ```python
-from llama_index import download_loader
+from llama_index.readers.firestore import FirestoreReader
 
-FirestoreReader = download_loader("FirestoreReader")
 reader = FirestoreReader(project_id="<Your Project ID>")
 ```
 
diff --git a/llama-index-integrations/readers/llama-index-readers-genius/README.md b/llama-index-integrations/readers/llama-index-readers-genius/README.md
index cce1ff9105..5ebf06cc78 100644
--- a/llama-index-integrations/readers/llama-index-readers-genius/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-genius/README.md
@@ -1,5 +1,9 @@
 # LlamaIndex Readers Integration: Genius
 
+```bash
+pip install llama-index-readers-genius
+```
+
 This loader connects to the Genius API and loads lyrics, metadata, and album art into `Documents`.
 
 As a prerequisite, you will need to register with [Genius API](https://genius.com/api-clients) and create an app in order to get a `client_id` and a `client_secret`. You should then set a `redirect_uri` for the app. The `redirect_uri` does not need to be functional. You should then generate an access token as an instantiator for the GeniusReader.
@@ -60,9 +64,7 @@ Here's an example usage of the GeniusReader. It will retrieve songs that match s
 - **Returns**: List of `Document` objects with song lyrics.
 
 ```python
-from llama_index.core.readers import download_loader
-
-GeniusReader = download_loader("GeniusReader")
+from llama_index.readers.genius import GeniusReader
 
 access_token = "your_generated_access_token"
 
@@ -79,7 +81,7 @@ This loader is designed to be used as a way to load data into [LlamaIndex](https
 ```python
 from llama_index.core import VectorStoreIndex, download_loader
 
-GeniusReader = download_loader("GeniusReader")
+from llama_index.readers.genius import GeniusReader
 
 access_token = "your_generated_access_token"
 
diff --git a/llama-index-integrations/readers/llama-index-readers-gpt-repo/README.md b/llama-index-integrations/readers/llama-index-readers-gpt-repo/README.md
index 286383911b..2609090e89 100644
--- a/llama-index-integrations/readers/llama-index-readers-gpt-repo/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-gpt-repo/README.md
@@ -1,5 +1,9 @@
 # GPT Repository Loader
 
+```bash
+pip install llama-index-readers-gpt-repo
+```
+
 This loader is an adaptation of https://github.com/mpoon/gpt-repository-loader
 to LlamaHub. Full credit goes to mpoon for coming up with this!
 
@@ -8,9 +12,7 @@ to LlamaHub. Full credit goes to mpoon for coming up with this!
 To use this loader, you need to pass in a path to a local Git repository
 
 ```python
-from llama_index import download_loader
-
-GPTRepoReader = download_loader("GPTRepoReader")
+from llama_index.readers.gpt_repo import GPTRepoReader
 
 loader = GPTRepoReader()
 documents = loader.load_data(
diff --git a/llama-index-integrations/readers/llama-index-readers-graphdb-cypher/README.md b/llama-index-integrations/readers/llama-index-readers-graphdb-cypher/README.md
index e39cf1bfcf..ee59f64a20 100644
--- a/llama-index-integrations/readers/llama-index-readers-graphdb-cypher/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-graphdb-cypher/README.md
@@ -1,5 +1,9 @@
 # Graph Database Cypher Loader
 
+```bash
+pip install llama-index-readers-graphdb-cypher
+```
+
 This loader populates documents from results of Cypher queries from a Graph database endpoint.
 The user specifies a GraphDB endpoint URL with optional credentials to initialize the reader.
 By declaring the Cypher query and optional parameters the loader can fetch the nested result docs.
@@ -14,10 +18,9 @@ Here's an example usage of the `GraphDBCypherReader`.
 You can test out queries directly with the Neo4j labs demo server: demo.neo4jlabs.com or with a free instance https://neo4j.com/aura
 
 ```python
-from llama_index import download_loader
 import os
 
-GraphDBCypherReader = download_loader("GraphDBCypherReader")
+from llama_index.readers.graphdb_cypher import GraphDBCypherReader
 
 uri = "neo4j+s://demo.neo4jlabs.com"
 username = "stackoverflow"
diff --git a/llama-index-integrations/readers/llama-index-readers-graphql/README.md b/llama-index-integrations/readers/llama-index-readers-graphql/README.md
index 4d77971922..adc08fb338 100644
--- a/llama-index-integrations/readers/llama-index-readers-graphql/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-graphql/README.md
@@ -1,5 +1,9 @@
 # GraphQL Loader
 
+```bash
+pip install llama-index-readers-graphql
+```
+
 This loader loads documents via GraphQL queries from a GraphQL endpoint.
 The user specifies a GraphQL endpoint URL with optional credentials to initialize the reader.
 By declaring the GraphQL query and optional variables (parameters) the loader can fetch the nested result docs.
@@ -10,10 +14,9 @@ Here's an example usage of the GraphQLReader.
 You can test out queries directly [on the site](https://countries.trevorblades.com/)
 
 ```python
-from llama_index import download_loader
 import os
 
-GraphQLReader = download_loader("GraphQLReader")
+from llama_index.readers.graphql import GraphQLReader
 
 uri = "https://countries.trevorblades.com/"
 headers = {}
diff --git a/llama-index-integrations/readers/llama-index-readers-guru/README.md b/llama-index-integrations/readers/llama-index-readers-guru/README.md
index 147a5ef9e6..d580cc6032 100644
--- a/llama-index-integrations/readers/llama-index-readers-guru/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-guru/README.md
@@ -1,5 +1,9 @@
 # Guru Loader
 
+```bash
+pip install llama-index-readers-guru
+```
+
 This loader loads documents from [Guru](https://www.getguru.com/). The user specifies a username and api key to initialize the GuruReader.
 
 Note this is not your password. You need to create a new api key in the admin tab of the portal.
@@ -9,9 +13,7 @@ Note this is not your password. You need to create a new api key in the admin ta
 Here's an example usage of the GuruReader.
 
 ```python
-from llama_index import download_loader
-
-GuruReader = download_loader("GuruReader")
+from llama_index.readers.guru import GuruReader
 
 reader = GuruReader(username="<GURU_USERNAME>", api_key="<GURU_API_KEY>")
 
diff --git a/llama-index-integrations/readers/llama-index-readers-hatena-blog/README.md b/llama-index-integrations/readers/llama-index-readers-hatena-blog/README.md
index 777749cb60..ef17a2892f 100644
--- a/llama-index-integrations/readers/llama-index-readers-hatena-blog/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-hatena-blog/README.md
@@ -1,5 +1,9 @@
 # Hatena Blog Loader
 
+```bash
+pip install llama-index-readers-hatena-blog
+```
+
 This loader fetches article from your own [Hatena Blog](https://hatenablog.com/) blog posts using the AtomPub API.
 
 You can get AtomPub info from the admin page after logging into Hatena Blog.
@@ -9,10 +13,9 @@ You can get AtomPub info from the admin page after logging into Hatena Blog.
 Here's an example usage of the HatenaBlogReader.
 
 ```python
-from llama_index import download_loader
 import os
 
-HatenaBlogReader = download_loader("HatenaBlogReader")
+from llama_index.readers.hatena_blog import HatenaBlogReader
 
 root_endpoint = os.getenv("ATOM_PUB_ROOT_ENDPOINT")
 api_key = os.getenv("ATOM_PUB_API_KEY")
diff --git a/llama-index-integrations/readers/llama-index-readers-hive/README.md b/llama-index-integrations/readers/llama-index-readers-hive/README.md
index 39e721c1c0..dd9be6afe8 100644
--- a/llama-index-integrations/readers/llama-index-readers-hive/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-hive/README.md
@@ -1,5 +1,9 @@
 # Hive Loader
 
+```bash
+pip install llama-index-readers-hive
+```
+
 The Hive Loader returns a set of texts corresponding to documents from Hive based on the customized query.
 The user initializes the loader with Hive connection args and then using query to fetch data from Hive.
 
@@ -8,9 +12,7 @@ The user initializes the loader with Hive connection args and then using query t
 Here's an example usage of the hiveReader to load 100 documents.
 
 ```python
-from llama_index import download_loader
-
-HiveReader = download_loader("HiveReader")
+from llama_index.readers.hive import HiveReader
 
 reader = HiveReader(
     host="localhost",
@@ -24,4 +26,4 @@ query = "SELECT * FROM p1 LIMIT 100"
 documents = reader.load_data(query=query)
 ```
 
-This loader is designed to be used as a way to load data into [LlamaIndex](https://github.com/run-llama/llama_index/tree/main/llama_index) and/or subsequently used as a Tool in a [LangChain](https://github.com/hwchase17/langchain) Agent. See [here](https://github.com/run-llama/llama-hub/tree/main/llama_hub) for examples.
+This loader is designed to be used as a way to load data into [LlamaIndex](https://github.com/run-llama/llama_index/tree/main/llama_index) and/or subsequently used as a Tool in a [LangChain](https://github.com/hwchase17/langchain) Agent.
diff --git a/llama-index-integrations/readers/llama-index-readers-hubspot/README.md b/llama-index-integrations/readers/llama-index-readers-hubspot/README.md
index d7682e2437..0660c3369f 100644
--- a/llama-index-integrations/readers/llama-index-readers-hubspot/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-hubspot/README.md
@@ -1,5 +1,9 @@
 # Hubspot Loader
 
+```bash
+pip install llama-index-readers-hubspot
+```
+
 This loader loads documents from Hubspot. The user specifies an access token to initialize the HubspotReader.
 
 At the moment, this loader only supports access token authentication. To obtain an access token, you will need to create a private app by following instructions [here](https://developers.hubspot.com/docs/api/private-apps).
@@ -9,10 +13,9 @@ At the moment, this loader only supports access token authentication. To obtain
 Here's an example usage of the HubspotReader.
 
 ```python
-from llama_index import download_loader
 import os
 
-HubspotReader = download_loader("HubspotReader")
+from llama_index.readers.hubspot import HubspotReader
 
 reader = HubspotReader("<HUBSPOT_ACCESS_TOKEN>")
 documents = reader.load_data()
diff --git a/llama-index-integrations/readers/llama-index-readers-huggingface-fs/README.md b/llama-index-integrations/readers/llama-index-readers-huggingface-fs/README.md
index bcfc874039..ebc6ece29e 100644
--- a/llama-index-integrations/readers/llama-index-readers-huggingface-fs/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-huggingface-fs/README.md
@@ -1,5 +1,9 @@
 # Hugging Face FS Loader
 
+```bash
+pip install llama-index-readers-huggingface-fs
+```
+
 This loader uses Hugging Face Hub's Filesystem API (> 0.14) to
 load datasets.
 
@@ -12,9 +16,8 @@ To use this loader, you need to pass in a path to a Hugging Face dataset.
 
 ```python
 from pathlib import Path
-from llama_index import download_loader
 
-HuggingFaceFSReader = download_loader("HuggingFaceFSReader")
+from llama_index.readers.huggingface_fs import HuggingFaceFSReader
 
 # load documents
 loader = HuggingFaceFSReader()
diff --git a/llama-index-integrations/readers/llama-index-readers-hwp/README.md b/llama-index-integrations/readers/llama-index-readers-hwp/README.md
index 0f45d30d01..a330901c19 100644
--- a/llama-index-integrations/readers/llama-index-readers-hwp/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-hwp/README.md
@@ -1,5 +1,9 @@
 # HWP Loader
 
+```bash
+pip install llama-index-readers-file
+```
+
 This loader reads the HWP file, which is the format of many official documents in South Korea.
 
 ## Usage
@@ -7,7 +11,7 @@ This loader reads the HWP file, which is the format of many official documents i
 To use this loader, you need to pass in a file name. It's fine whether the file is compressed or not.
 
 ```python
-from llama_hub.hangeul import HWPReader
+from llama_index.readers.file import HWPReader
 from pathlib import Path
 
 hwp_path = Path("/path/to/hwp")
diff --git a/llama-index-integrations/readers/llama-index-readers-imdb-review/README.md b/llama-index-integrations/readers/llama-index-readers-imdb-review/README.md
index 844e67ced7..20d8daf266 100644
--- a/llama-index-integrations/readers/llama-index-readers-imdb-review/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-imdb-review/README.md
@@ -1,5 +1,9 @@
 ## IMDB MOVIE REVIEWS LOADER
 
+```bash
+pip install llama-index-readers-imdb-review
+```
+
 This loader fetches all the reviews of a movie or a TV-series from IMDB official site. This loader is working on Windows machine and it requires further debug on Linux. Fixes are on the way
 
 Install the required dependencies
@@ -18,9 +22,7 @@ The IMDB downloader takes in two attributes
 ## Usage
 
 ```python
-from llama_index import download_loader
-
-IMDBReviewsloader = download_loader("IMDBReviews")
+from llama_index.readers.imdb_review import IMDBReviews
 
 loader = IMDBReviews(
     movie_name_year="The Social Network 2010", webdriver_engine="edge"
@@ -47,10 +49,10 @@ This loader can be used with both Langchain and LlamaIndex.
 ### LlamaIndex
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
-from llama_index import VectorStoreIndex
+from llama_index.core import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex
 
-IMDBReviewsloader = download_loader("IMDBReviews")
+from llama_index.readers.imdb_review import IMDBReviews
 
 loader = IMDBReviewsloader(
     movie_name_year="The Social Network 2010",
@@ -72,7 +74,6 @@ print(response)
 ### Langchain
 
 ```python
-from llama_index import download_loader
 from langchain.llms import OpenAI
 from langchain.agents.agent_toolkits.pandas import (
     create_pandas_dataframe_agent,
@@ -81,7 +82,7 @@ from langchain.agents import Tool
 from langchain.agents import initialize_agent
 from langchain.chat_models import ChatOpenAI
 
-IMDBReviewsloader = download_loader("IMDBReviews")
+from llama_index.readers.imdb_review import IMDBReviews
 
 loader = IMDBReviewsloader(
     movie_name_year="The Social Network 2010",
diff --git a/llama-index-integrations/readers/llama-index-readers-intercom/README.md b/llama-index-integrations/readers/llama-index-readers-intercom/README.md
index 7c6c716319..f15265703a 100644
--- a/llama-index-integrations/readers/llama-index-readers-intercom/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-intercom/README.md
@@ -1,5 +1,9 @@
 # Intercom Loader
 
+```bash
+pip install llama-index-readers-intercom
+```
+
 This loader fetches the text from Intercom help articles using the Intercom API. It also uses the BeautifulSoup library to parse the HTML and extract the text from the articles.
 
 ## Usage
@@ -7,9 +11,7 @@ This loader fetches the text from Intercom help articles using the Intercom API.
 To use this loader, you need to pass in an Intercom account access token.
 
 ```python
-from llama_index import download_loader
-
-IntercomReader = download_loader("IntercomReader")
+from llama_index.readers.intercom import IntercomReader
 
 loader = IntercomReader(intercom_access_token="my_access_token")
 documents = loader.load_data()
diff --git a/llama-index-integrations/readers/llama-index-readers-jira/README.md b/llama-index-integrations/readers/llama-index-readers-jira/README.md
index a36d63872b..2e76552ba9 100644
--- a/llama-index-integrations/readers/llama-index-readers-jira/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-jira/README.md
@@ -1,5 +1,9 @@
 # JIRA Reader
 
+```bash
+pip install llama-index-readers-jira
+```
+
 The Jira loader returns a set of issues based on the query provided to the dataloader.
 We can follow two methods to initialize the loader-
 1- basic_auth -> this takes a dict with the following keys
@@ -21,7 +25,7 @@ You can follow this link for more information regarding Oauth2 -> https://develo
 Here's an example of how to use it
 
 ```python
-from llama_hub.jira import JiraReader
+from llama_index.readers.jira import JiraReader
 
 reader = JiraReader(
     email=email, api_token=api_token, server_url="your-jira-server.com"
@@ -32,9 +36,7 @@ documents = reader.load_data(query="project = <your-project>")
 Alternately, you can also use download_loader from llama_index
 
 ```python
-from llama_index import download_loader
-
-JiraReader = download_loader("JiraReader")
+from llama_index.readers.jira import JiraReader
 
 reader = JiraReader(
     email=email, api_token=api_token, server_url="your-jira-server.com"
diff --git a/llama-index-integrations/readers/llama-index-readers-joplin/README.md b/llama-index-integrations/readers/llama-index-readers-joplin/README.md
index bcd7afb585..c8a14e07fd 100644
--- a/llama-index-integrations/readers/llama-index-readers-joplin/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-joplin/README.md
@@ -1,5 +1,9 @@
 # Joplin (Markdown) Loader
 
+```bash
+pip install llama-index-readers-joplin
+```
+
 > [Joplin](https://joplinapp.org/) is an open source note-taking app. Capture your thoughts and securely access them from any device.
 
 This readme covers how to load documents from a `Joplin` database.
@@ -20,10 +24,10 @@ An alternative to this approach is to export the `Joplin`'s note database to Mar
 Here's an example usage of the JoplinReader.
 
 ```python
-from llama_index import download_loader
 import os
 
-JoplinReader = download_loader("JoplinReader")
+from llama_index.readers.joplin import JoplinReader
+
 documents = JoplinReader(
     access_token="<access_token>"
 ).load_data()  # Returns list of documents
diff --git a/llama-index-integrations/readers/llama-index-readers-kaltura/README.md b/llama-index-integrations/readers/llama-index-readers-kaltura/README.md
index 6aba072088..1d5fae146e 100644
--- a/llama-index-integrations/readers/llama-index-readers-kaltura/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-kaltura/README.md
@@ -1,5 +1,9 @@
 # Kaltura eSearch Loader
 
+```bash
+pip install llama-index-readers-kaltura-esearch
+```
+
 This loader reads Kaltura Entries from [Kaltura](https://corp.kaltura.com) based on a Kaltura eSearch API call.
 Search queries can be passed as a pre-defined object of KalturaESearchEntryParams, or through a simple free text query.
 The result is a list of documents containing the Kaltura Entries and Captions json.
@@ -64,9 +68,7 @@ Each dictionary in the response represents a Kaltura media entry, where the keys
 First, instantiate the KalturaReader (aka Kaltura Loader) with your Kaltura configuration credentials:
 
 ```python
-from llama_index import download_loader
-
-KalturaESearchReader = download_loader("KalturaESearchReader")
+from llama_index.readers.kaltura_esearch import KalturaESearchReader
 
 loader = KalturaESearchReader(
     partnerId="INSERT_YOUR_PARTNER_ID",
diff --git a/llama-index-integrations/readers/llama-index-readers-kibela/README.md b/llama-index-integrations/readers/llama-index-readers-kibela/README.md
index 97323863e8..d4a39b1a45 100644
--- a/llama-index-integrations/readers/llama-index-readers-kibela/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-kibela/README.md
@@ -1,5 +1,9 @@
 # Kibela Reader
 
+```bash
+pip install llama-index-readers-kibela
+```
+
 This reader fetches article from your [Kibela](https://kibe.la/) notes using the GraphQL API.
 
 # Usage
@@ -8,7 +12,7 @@ Here's an example of how to use it. You can get your access token from [here](ht
 
 ```python
 import os
-from llama_hub.kibela import KibelaReader
+from llama_index.readers.kibela import KibelaReader
 
 team = os.environ["KIBELA_TEAM"]
 token = os.environ["KIBELA_TOKEN"]
@@ -21,9 +25,8 @@ Alternately, you can also use download_loader from llama_index
 
 ```python
 import os
-from llama_index import download_loader
 
-KibelaReader = download_loader("KibelaReader")
+from llama_index.readers.kibela import KibelaReader
 
 team = os.environ["KIBELA_TEAM"]
 token = os.environ["KIBELA_TOKEN"]
diff --git a/llama-index-integrations/readers/llama-index-readers-lilac/README.md b/llama-index-integrations/readers/llama-index-readers-lilac/README.md
index 0a396b701a..210d23f6bb 100644
--- a/llama-index-integrations/readers/llama-index-readers-lilac/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-lilac/README.md
@@ -1,5 +1,11 @@
 # Lilac reader
 
+```bash
+pip install llama-index-readers-papers
+
+pip install llama-index-readers-lilac
+```
+
 [Lilac](https://lilacml.com/) is an open-source product that helps you analyze, enrich, and clean unstructured data with AI.
 
 It can be used to analyze, clean, structure, and label data that can be used in downstream LlamaIndex and LangChain applications.
@@ -17,11 +23,10 @@ You can use any LlamaIndex loader to load data into Lilac, clean data, and then
 See [this notebook](https://github.com/lilacai/lilac/blob/main/notebooks/LlamaIndexLoader.ipynb) for getting data into Lilac from LlamaHub.
 
 ```python
-from llama_index import download_loader
 import lilac as ll
 
 # See: https://llamahub.ai/l/papers-arxiv
-ArxivReader = download_loader("ArxivReader")
+from llama_index.readers.papers import ArxivReader
 
 loader = ArxivReader()
 documents = loader.load_data(search_query="au:Karpathy")
@@ -49,9 +54,9 @@ ll.start_server(project_dir="./data")
 ### Lilac => LlamaIndex Documents
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 
-LilacReader = download_loader("LilacReader")
+from llama_index.readers.lilac import LilacReader
 
 loader = LilacReader()
 documents = loader.load_data(
diff --git a/llama-index-integrations/readers/llama-index-readers-linear/README.md b/llama-index-integrations/readers/llama-index-readers-linear/README.md
index 59e14cdc71..3c3a0f97cc 100644
--- a/llama-index-integrations/readers/llama-index-readers-linear/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-linear/README.md
@@ -1,5 +1,9 @@
 # Linear Reader
 
+```bash
+pip install llama-index-readers-linear
+```
+
 The Linear loader returns issue based on the query.
 
 ## Usage
@@ -7,7 +11,7 @@ The Linear loader returns issue based on the query.
 Here's an example of how to use it
 
 ```python
-from llama_hub.linear import LinearReader
+from llama_index.readers.linear import LinearReader
 
 reader = LinearReader(api_key=api_key)
 query = """
@@ -38,9 +42,7 @@ documents = reader.load_data(query=query)
 Alternately, you can also use download_loader from llama_index
 
 ```python
-from llama_index import download_loader
-
-LinearReader = download_loader("LinearReader")
+from llama_index.readers.linear import LinearReader
 
 reader = LinearReader(api_key=api_key)
 query = """
diff --git a/llama-index-integrations/readers/llama-index-readers-macrometa-gdn/README.md b/llama-index-integrations/readers/llama-index-readers-macrometa-gdn/README.md
index e2fd8a41be..a9fae3033b 100644
--- a/llama-index-integrations/readers/llama-index-readers-macrometa-gdn/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-macrometa-gdn/README.md
@@ -1,5 +1,9 @@
 # Macrometa GDN Loader
 
+```bash
+pip install llama-index-readers-macrometa-gdn
+```
+
 This loader takes in a Macrometa federation URL, API key, and collection name and returns a list of vectors.
 
 ## Usage
@@ -7,9 +11,7 @@ This loader takes in a Macrometa federation URL, API key, and collection name an
 To use this loader, you need to pass the URL and API key through the class constructor, and then load the data using an array of collection names.
 
 ```python
-from llama_index import download_loader
-
-MacrometaGDNReader = download_loader("MacrometaGDNReader")
+from llama_index.readers.macrometa_gdn import MacrometaGDNReader
 
 collections = ["test_collection"]
 loader = MacrometaGDNReader(url="https://api-macrometa.io", apikey="test")
diff --git a/llama-index-integrations/readers/llama-index-readers-mangadex/README.md b/llama-index-integrations/readers/llama-index-readers-mangadex/README.md
index 685c59af9f..893aadd634 100644
--- a/llama-index-integrations/readers/llama-index-readers-mangadex/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-mangadex/README.md
@@ -1,13 +1,15 @@
 # MangaDex Loader
 
+```bash
+pip install llama-index-readers-mangadex
+```
+
 This loader fetches information from the MangaDex API, by manga title.
 
 ## Usage
 
 ```python
-from llama_index import download_loader
-
-MangaDexReader = download_loader("MangaDexReader")
+from llama_index.readers.mangadex import MangaDexReader
 
 loader = MangaDexReader()
 documents = loader.load_data(
diff --git a/llama-index-integrations/readers/llama-index-readers-mangoapps-guides/README.md b/llama-index-integrations/readers/llama-index-readers-mangoapps-guides/README.md
index 87e03688c8..51f392bd8c 100644
--- a/llama-index-integrations/readers/llama-index-readers-mangoapps-guides/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-mangoapps-guides/README.md
@@ -1,5 +1,9 @@
 # MangoppsGuides Loader
 
+```bash
+pip install llama-index-readers-mangoapps-guides
+```
+
 This loader fetches the text from Mangopps Guides.
 
 ## Usage
@@ -7,9 +11,7 @@ This loader fetches the text from Mangopps Guides.
 To use this loader, you need to pass base url of the MangoppsGuides installation (e.g. `https://guides.mangoapps.com/`) and the limit , i.e. max number of links it should crawl
 
 ```python
-from llama_index import download_loader
-
-MangoppsGuidesReader = download_loader("MangoppsGuidesReader")
+from llama_index.readers.mangoapps_guides import MangoppsGuidesReader
 
 loader = MangoppsGuidesReader()
 documents = loader.load_data(
diff --git a/llama-index-integrations/readers/llama-index-readers-maps/README.md b/llama-index-integrations/readers/llama-index-readers-maps/README.md
index 920dfdd45f..47f318eb75 100644
--- a/llama-index-integrations/readers/llama-index-readers-maps/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-maps/README.md
@@ -1,5 +1,9 @@
 # **_Osmmap Loader_**
 
+```bash
+pip install llama-index-readers-maps
+```
+
 The Osmmap Loader will fetch map data from the [Overpass](https://wiki.openstreetmap.org/wiki/Main_Page) api for a certain place or area. Version **Overpass API 0.7.60** is used by this loader.
 
 The api will provide you with all the **nodes, relations, and ways** for the particular region when you request data for a region or location.
@@ -27,9 +31,7 @@ She requires all the nodes, routes, and relations within a five-kilometer radius
 ### And the code snippet looks like
 
 ```python
-from llama_index import download_loader
-
-MapReader = download_loader("OpenMap")
+from llama_index.readers.maps import OpenMap
 
 loader = MapReader()
 documents = loader.load_data(
@@ -46,9 +48,7 @@ documents = loader.load_data(
 - so she search for hospital tag in the [Taginfo](https://taginfo.openstreetmap.org/tags) and she got
 
 ```python
-from llama_index import download_loader
-
-MapReader = download_loader("OpenMap")
+from llama_index.readers.maps import OpenMap
 
 loader = MapReader()
 documents = loader.load_data(
diff --git a/llama-index-integrations/readers/llama-index-readers-memos/README.md b/llama-index-integrations/readers/llama-index-readers-memos/README.md
index 85dfd31c08..849e4dd1fe 100644
--- a/llama-index-integrations/readers/llama-index-readers-memos/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-memos/README.md
@@ -1,5 +1,9 @@
 # Memos Loader
 
+```bash
+pip install llama-index-readers-memos
+```
+
 This loader fetches text from self-hosted [memos](https://github.com/usememos/memos).
 
 ## Usage
@@ -7,9 +11,8 @@ This loader fetches text from self-hosted [memos](https://github.com/usememos/me
 To use this loader, you need to specify the host where memos is deployed. If you need to filter, pass the [corresponding parameter](https://github.com/usememos/memos/blob/4fe8476169ecd2fc4b164a25611aae6861e36812/api/memo.go#L76) in `load_data`.
 
 ```python
-from llama_index import download_loader
+from llama_index.readers.memos import MemosReader
 
-MemosReader = download_loader("MemosReader")
 loader = MemosReader("https://demo.usememos.com/")
 documents = loader.load_data({"creatorId": 101})
 ```
diff --git a/llama-index-integrations/readers/llama-index-readers-microsoft-onedrive/README.md b/llama-index-integrations/readers/llama-index-readers-microsoft-onedrive/README.md
index e2df15698b..05a43f3de0 100644
--- a/llama-index-integrations/readers/llama-index-readers-microsoft-onedrive/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-microsoft-onedrive/README.md
@@ -1,5 +1,9 @@
 # Microsoft OneDrive Loader
 
+```bash
+pip install llama-index-readers-microsoft-onedrive
+```
+
 This loader reads files from:
 
 - Microsoft OneDrive Personal [(https://onedrive.live.com/)](https://onedrive.live.com/) and
@@ -61,9 +65,7 @@ For example, the file_id of `https://onedrive.live.com/?cid=0B5AF52BE769DFDE4&id
 #### OneDrive Personal Example Usage:
 
 ```python
-from llama_index import download_loader
-
-OneDriveReader = download_loader("OneDriveReader")
+from llama_index.readers.microsoft_onedrive import OneDriveReader
 
 # User Authentication flow: Replace client id with your own id
 loader = OneDriveReader(client_id="82ee706e-2439-47fa-877a-95048ead9318")
@@ -108,9 +110,7 @@ For example, the path of file "demo_doc.docx" within test subfolder from previou
 #### OneDrive For Business Example Usage:
 
 ```python
-from llama_index import download_loader
-
-OneDriveReader = download_loader("OneDriveReader")
+from llama_index.readers.microsoft_onedrive import OneDriveReader
 
 loader = OneDriveReader(
     client_id="82ee706e-2439-47fa-877a-95048ead9318",
diff --git a/llama-index-integrations/readers/llama-index-readers-microsoft-outlook/README.md b/llama-index-integrations/readers/llama-index-readers-microsoft-outlook/README.md
index 3869c7a6cd..c2f556fb67 100644
--- a/llama-index-integrations/readers/llama-index-readers-microsoft-outlook/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-microsoft-outlook/README.md
@@ -1,5 +1,9 @@
 # Outlook Local Calendar Loader
 
+```bash
+pip install llama-index-readers-microsoft-outlook
+```
+
 This loader reads your past and upcoming Calendar events from your local Outlook .ost or .pst and parses the relevant info into `Documents`.
 
 It runs on Windows only and has only been tested with Windows 11. It has been designed to have a supoerset of the functionality of the Google Calendar reader.
@@ -11,9 +15,7 @@ Here's an example usage of the OutlookCalendar Reader. It will retrieve up to 10
 It always returns Start, End, Subject, Location, and Organizer attributes and optionally returns additional attributes specified in the `more_attributes` parameter, which, if specified, must be a list of strings eg. ['Body','someotherattribute',...]. Attributes which don't exist in a calendar entry are ignored without warning.
 
 ```python
-from llama_index import download_loader
-
-OutlookCalendarReader = download_loader("OutlookLocalCalendarReader")
+from llama_index.readers.microsoft_outlook import OutlookLocalCalendarReader
 
 loader = OutlookCalendarReader()
 documents = loader.load_data()
@@ -26,9 +28,9 @@ This loader is designed to be used as a way to load data into [LlamaIndex](https
 ### LlamaIndex
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 
-OutlookCalendarReader = download_loader("OutlookLocalCalendarReader")
+from llama_index.readers.microsoft_outlook import OutlookLocalCalendarReader
 
 loader = OutlookCalendarReader(
     start_date="2022-01-01", number_of_documents=1000
diff --git a/llama-index-integrations/readers/llama-index-readers-microsoft-sharepoint/README.md b/llama-index-integrations/readers/llama-index-readers-microsoft-sharepoint/README.md
index e382b0f013..9ea404eae4 100644
--- a/llama-index-integrations/readers/llama-index-readers-microsoft-sharepoint/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-microsoft-sharepoint/README.md
@@ -1,5 +1,9 @@
 # Microsoft SharePoint Reader
 
+```bash
+pip install llama-index-readers-microsoft-sharepoint
+```
+
 The loader loads the files from a folder in sharepoint site.
 
 It also supports traversing recursively through the sub-folders.
@@ -27,9 +31,7 @@ If the files are present in the `Test` folder in SharePoint Site under `root` di
 ![FilePath](file_path_info.png)
 
 ```python
-from llama_index import download_loader
-
-SharePointLoader = download_loader("SharePointReader")
+from llama_index.readers.microsoft_sharepoint import SharePointReader
 
 loader = SharePointLoader(
     client_id="<Client ID of the app>",
diff --git a/llama-index-integrations/readers/llama-index-readers-minio/README.md b/llama-index-integrations/readers/llama-index-readers-minio/README.md
index d0bb5d20dd..6fa3d65e70 100644
--- a/llama-index-integrations/readers/llama-index-readers-minio/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-minio/README.md
@@ -6,4 +6,4 @@
 
 ## Import
 
-`from llama_index.readers.minio import MinioReader, BotoMinioReader`
+from llama_index.core.readers.minio import MinioReader, BotoMinioReader`
diff --git a/llama-index-integrations/readers/llama-index-readers-minio/llama_index/README.md b/llama-index-integrations/readers/llama-index-readers-minio/llama_index/README.md
index e17e81f4d6..3ae38a2382 100644
--- a/llama-index-integrations/readers/llama-index-readers-minio/llama_index/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-minio/llama_index/README.md
@@ -13,8 +13,6 @@ To use this loader, you need to pass in the name of your Minio Bucket. After tha
 Otherwise, you may specify a prefix if you only want to parse certain files in the Bucket, or a subdirectory.
 
 ```python
-from llama_index import download_loader
-
 MinioReader = download_loader("BotoMinioReader")
 loader = MinioReader(
     bucket="documents",
@@ -40,8 +38,6 @@ Otherwise, you may specify a prefix if you only want to parse certain files in t
 You can now use the client with a TLS-secured MinIO instance (`minio_secure=True`), even if server's certificate isn't trusted (`minio_cert_check=False`).
 
 ```python
-from llama_index import download_loader
-
 MinioReader = download_loader("MinioReader")
 loader = MinioReader(
     bucket="documents",
diff --git a/llama-index-integrations/readers/llama-index-readers-mondaydotcom/README.md b/llama-index-integrations/readers/llama-index-readers-mondaydotcom/README.md
index 47e4b14b94..fd9d1f696f 100644
--- a/llama-index-integrations/readers/llama-index-readers-mondaydotcom/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-mondaydotcom/README.md
@@ -1,5 +1,9 @@
 # Monday Loader
 
+```bash
+pip install llama-index-readers-mondaydotcom
+```
+
 This loader loads data from monday.com. The user specifies an API token to initialize the MondayReader. They then specify a monday.com board id to load in the corresponding Document objects.
 
 ## Usage
@@ -7,9 +11,7 @@ This loader loads data from monday.com. The user specifies an API token to initi
 Here's an example usage of the MondayReader.
 
 ```python
-from llama_index import download_loader
-
-MondayReader = download_loader("MondayReader")
+from llama_index.readers.mondaydotcom import MondayReader
 
 reader = MondayReader("<monday_api_token>")
 documents = reader.load_data("<board_id: int>")
diff --git a/llama-index-integrations/readers/llama-index-readers-nougat-ocr/README.md b/llama-index-integrations/readers/llama-index-readers-nougat-ocr/README.md
index d38c41f7a0..56b3285ae3 100644
--- a/llama-index-integrations/readers/llama-index-readers-nougat-ocr/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-nougat-ocr/README.md
@@ -1,5 +1,9 @@
 # Nougat OCR loader
 
+```bash
+pip install llama-index-readers-nougat-ocr
+```
+
 This loader reads the equations, symbols, and tables included in the PDF.
 
 Users can input the path of the academic PDF document `file` which they want to parse. This OCR understands LaTeX math and tables.
@@ -9,7 +13,7 @@ Users can input the path of the academic PDF document `file` which they want to
 Here's an example usage of the PDFNougatOCR.
 
 ```python
-from llama_hub.nougat_ocr import PDFNougatOCR
+from llama_index.readers.nougat_ocr import PDFNougatOCR
 
 reader = PDFNougatOCR()
 
diff --git a/llama-index-integrations/readers/llama-index-readers-openalex/README.md b/llama-index-integrations/readers/llama-index-readers-openalex/README.md
index a171d0697b..3ba2fb3b3b 100644
--- a/llama-index-integrations/readers/llama-index-readers-openalex/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-openalex/README.md
@@ -1,11 +1,15 @@
 # OpenAlex Reader
 
+```bash
+pip install llama-index-readers-openalex
+```
+
 This loader will search for papers in OpenAlex and load them in llama-index. The main advantage of using OpenAlex is that you can search the full-text for Open Access papers as well.
 
 ## Usage
 
 ```python
-from llama_hub.openalex_loader import OpenAlexReader
+from llama_index.readers.openalex import OpenAlexReader
 
 openalex_reader = OpenAlexReader(email="shauryr@gmail.com")
 query = "biases in large language models"
diff --git a/llama-index-integrations/readers/llama-index-readers-opendal/README.md b/llama-index-integrations/readers/llama-index-readers-opendal/README.md
index 86cf7176ef..8890a2cc0a 100644
--- a/llama-index-integrations/readers/llama-index-readers-opendal/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-opendal/README.md
@@ -1,5 +1,9 @@
 # OpenDAL Loaders
 
+```bash
+pip install llama-index-readers-opendal
+```
+
 ## Base OpendalReader
 
 This loader parses any file via [Apache OpenDAL](https://github.com/apache/incubator-opendal).
@@ -11,9 +15,7 @@ All files are temporarily downloaded locally and subsequently parsed with `Simpl
 `OpendalReader` can read data from any supported storage services including `s3`, `azblob`, `gcs` and so on.
 
 ```python
-from llama_index import download_loader
-
-OpendalReader = download_loader("OpendalReader")
+from llama_index.readers.opendal import OpendalReader
 
 loader = OpendalReader(
     scheme="s3",
@@ -40,9 +42,7 @@ All files are temporarily downloaded locally and subsequently parsed with `Simpl
 ### Usage
 
 ```python
-from llama_index import download_loader
-
-OpendalAzblobReader = download_loader("OpendalAzblobReader")
+from llama_index.readers.opendal import OpendalAzblobReader
 
 loader = OpendalAzblobReader(
     container="container",
@@ -69,9 +69,7 @@ All files are temporarily downloaded locally and subsequently parsed with `Simpl
 ### Usage
 
 ```python
-from llama_index import download_loader
-
-OpendalGcsReader = download_loader("OpendalGcsReader")
+from llama_index.readers.opendal import OpendalGcsReader
 
 loader = OpendalGcsReader(
     bucket="bucket",
@@ -99,10 +97,6 @@ All files are temporarily downloaded locally and subsequently parsed with `Simpl
 ### Usage
 
 ```python
-from llama_index import download_loader
-
-OpendalS3Reader = download_loader("OpendalS3Reader")
-
 loader = OpendalS3Reader(
     bucket="bucket",
     path="path/to/data/",
diff --git a/llama-index-integrations/readers/llama-index-readers-opensearch/README.md b/llama-index-integrations/readers/llama-index-readers-opensearch/README.md
index 2af8cd6372..b9a172008b 100644
--- a/llama-index-integrations/readers/llama-index-readers-opensearch/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-opensearch/README.md
@@ -1,5 +1,9 @@
 # Opensearch Loader
 
+```bash
+pip install llama-index-readers-opensearch
+```
+
 The Opensearch Loader returns a set of texts corresponding to documents retrieved from an Opensearch index.
 The user initializes the loader with an Opensearch index. They then pass in a field, and optionally a JSON query DSL object to fetch the fields they want.
 
@@ -8,9 +12,7 @@ The user initializes the loader with an Opensearch index. They then pass in a fi
 Here's an example usage of the OpensearchReader to load 100 documents.
 
 ```python
-from llama_index import download_loader
-
-OpensearchReader = download_loader("OpensearchReader")
+from llama_index.readers.opensearch import OpensearchReader
 
 reader = OpensearchReader(
     host="localhost",
diff --git a/llama-index-integrations/readers/llama-index-readers-pandas-ai/README.md b/llama-index-integrations/readers/llama-index-readers-pandas-ai/README.md
index 7c5dc5d16c..15124e6e2f 100644
--- a/llama-index-integrations/readers/llama-index-readers-pandas-ai/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-pandas-ai/README.md
@@ -1,5 +1,9 @@
 # Pandas AI Loader
 
+```bash
+pip install llama-index-readers-pandas-ai
+```
+
 This loader is a light wrapper around the `PandasAI` Python package.
 
 See here: https://github.com/gventuri/pandas-ai.
@@ -10,7 +14,6 @@ you can choose to load in `Document` objects via `load_data`.
 ## Usage
 
 ```python
-from llama_index import download_loader
 from pandasai.llm.openai import OpenAI
 import pandas as pd
 
@@ -47,7 +50,7 @@ df = pd.DataFrame(
 
 llm = OpenAI()
 
-PandasAIReader = download_loader("PandasAIReader")
+from llama_index.readers.pandas_ai import PandasAIReader
 
 # use run_pandas_ai directly
 # set is_conversational_answer=False to get parsed output
diff --git a/llama-index-integrations/readers/llama-index-readers-papers/README.md b/llama-index-integrations/readers/llama-index-readers-papers/README.md
index 54d66b6bc3..7dbebcfb74 100644
--- a/llama-index-integrations/readers/llama-index-readers-papers/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-papers/README.md
@@ -1,5 +1,9 @@
 # Papers Loaders
 
+```bash
+pip install llama-index-readers-papers
+```
+
 ## Arxiv Papers Loader
 
 This loader fetches the text from the most relevant scientific papers on Arxiv specified by a search query (e.g. "Artificial Intelligence"). For each paper, the abstract is extracted and put in a separate document. The search query may be any string, Arxiv paper id, or a general Arxiv query string (see the full list of capabilities [here](https://info.arxiv.org/help/api/user-manual.html#query_details)).
@@ -9,9 +13,7 @@ This loader fetches the text from the most relevant scientific papers on Arxiv s
 To use this loader, you need to pass in the search query. You may also optionally specify a local directory to temporarily store the paper PDFs (they are deleted automatically) and the maximum number of papers you want to parse for your search query (default is 10).
 
 ```python
-from llama_index import download_loader
-
-ArxivReader = download_loader("ArxivReader")
+from llama_index.readers.papers import ArxivReader
 
 loader = ArxivReader()
 documents = loader.load_data(search_query="au:Karpathy")
@@ -20,9 +22,7 @@ documents = loader.load_data(search_query="au:Karpathy")
 Alternatively, if you would like to load papers and abstracts separately:
 
 ```python
-from llama_index import download_loader
-
-ArxivReader = download_loader("ArxivReader")
+from llama_index.readers.papers import ArxivReader
 
 loader = ArxivReader()
 documents, abstracts = loader.load_papers_and_abstracts(
@@ -41,9 +41,7 @@ This loader fetches the text from the most relevant scientific papers on Pubmed
 To use this loader, you need to pass in the search query. You may also optionally specify the maximum number of papers you want to parse for your search query (default is 10).
 
 ```python
-from llama_index import download_loader
-
-PubmedReader = download_loader("PubmedReader")
+from llama_index.readers.papers import PubmedReader
 
 loader = PubmedReader()
 documents = loader.load_data(search_query="amyloidosis")
diff --git a/llama-index-integrations/readers/llama-index-readers-patentsview/README.md b/llama-index-integrations/readers/llama-index-readers-patentsview/README.md
index 127d653495..c283ffa887 100644
--- a/llama-index-integrations/readers/llama-index-readers-patentsview/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-patentsview/README.md
@@ -1,5 +1,9 @@
 # Patentsview Loader
 
+```bash
+pip install llama-index-readers-patentsview
+```
+
 This loader loads patent abstract from `a list of patent numbers` with API provided by [Patentsview](https://patentsview.org/).
 
 ## Usage
@@ -7,9 +11,8 @@ This loader loads patent abstract from `a list of patent numbers` with API provi
 Here'a an example usage of PatentsviewReader.
 
 ```python
-from llama_index import download_loader
+from llama_index.readers.patentsview import PatentsviewReader
 
-PatentsviewReader = download_loader("PatentsviewReader")
 loader = PatentsviewReader()
 patents = ["8848839", "10452978"]
 abstracts = loader.load_data(patents)
diff --git a/llama-index-integrations/readers/llama-index-readers-pdb/README.md b/llama-index-integrations/readers/llama-index-readers-pdb/README.md
index 9997c20ac4..b82f352b14 100644
--- a/llama-index-integrations/readers/llama-index-readers-pdb/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-pdb/README.md
@@ -1,5 +1,9 @@
 # Protein Data Bank (PDB) publication Loader
 
+```bash
+pip install llama-index-readers-pdb
+```
+
 This loader fetches the abstract of PDB entries using the RCSB (Research Collaboratory for Structural Bioinformatics) or EBI (European Bioinformatics Institute) REST api.
 
 ## Usage
@@ -7,7 +11,7 @@ This loader fetches the abstract of PDB entries using the RCSB (Research Collabo
 To use this loader, simply pass an array of PDB ids into `load_data`:
 
 ```python
-from llama_hub.pdb import PdbAbstractReader
+from llama_index.readers.pdb import PdbAbstractReader
 
 loader = PdbAbstractReader()
 documents = loader.load_data(pdb_id=["1cbs"])
diff --git a/llama-index-integrations/readers/llama-index-readers-pdf-table/README.md b/llama-index-integrations/readers/llama-index-readers-pdf-table/README.md
index bfe9e0271a..5036097ec1 100644
--- a/llama-index-integrations/readers/llama-index-readers-pdf-table/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-pdf-table/README.md
@@ -1,5 +1,9 @@
 # PDF Table Loader
 
+```bash
+pip install llama-index-readers-pdf-table
+```
+
 This loader reads the tables included in the PDF.
 
 Users can input the PDF `file` and the `pages` from which they want to extract tables, and they can read the tables included on those pages.
@@ -10,7 +14,7 @@ Here's an example usage of the PDFTableReader.
 `pages` parameter is the same as camelot's `pages`. Therefore, you can use patterns such as `all`, `1,2,3`, `10-20`, and so on.
 
 ```python
-from llama_hub.pdf_table import PDFTableReader
+from llama_index.readers.pdf_table import PDFTableReader
 from pathlib import Path
 
 reader = PDFTableReader()
diff --git a/llama-index-integrations/readers/llama-index-readers-preprocess/README.md b/llama-index-integrations/readers/llama-index-readers-preprocess/README.md
index ae514fbdf4..ebd34f3fde 100644
--- a/llama-index-integrations/readers/llama-index-readers-preprocess/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-preprocess/README.md
@@ -1,5 +1,9 @@
 # Preprocess Loader
 
+```bash
+pip install llama-index-readers-preprocess
+```
+
 [Preprocess](https://preprocess.co) is an API service that splits any kind of document into optimal chunks of text for use in language model tasks.
 Given documents in input `Preprocess` splits them into chunks of text that respect the layout and semantics of the original document.
 We split the content by taking into account sections, paragraphs, lists, images, data tables, text tables, and slides, and following the content semantics for long texts.
@@ -26,10 +30,9 @@ To chunk a file pass a valid filepath and the reader will start converting and c
 If you want to handle the nodes directly:
 
 ```python
-from llama_index import VectorStoreIndex
-from llama_index import download_loader
+from llama_index.core import VectorStoreIndex
 
-PreprocessReader = download_loader("PreprocessReader")
+from llama_index.readers.preprocess import PreprocessReader
 
 # pass a filepath and get the chunks as nodes
 loader = PreprocessReader(
@@ -45,10 +48,9 @@ query_engine = index.as_query_engine()
 By default load_data() returns a document for each chunk, remember to not apply any splitting to these documents
 
 ```python
-from llama_index import VectorStoreIndex
-from llama_index import download_loader
+from llama_index.core import VectorStoreIndex
 
-PreprocessReader = download_loader("PreprocessReader")
+from llama_index.readers.preprocess import PreprocessReader
 
 # pass a filepath and get the chunks as nodes
 loader = PreprocessReader(
diff --git a/llama-index-integrations/readers/llama-index-readers-rayyan/README.md b/llama-index-integrations/readers/llama-index-readers-rayyan/README.md
index d5dae921ea..05c589fa43 100644
--- a/llama-index-integrations/readers/llama-index-readers-rayyan/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-rayyan/README.md
@@ -1,5 +1,9 @@
 # Rayyan Loader
 
+```bash
+pip install llama-index-readers-rayyan
+```
+
 This loader fetches review articles from [Rayyan](https://www.rayyan.ai/)
 using the [Rayyan SDK](https://github.com/rayyansys/rayyan-python-sdk). All articles
 for a given review are fetched by default unless a filter is specified.
@@ -11,9 +15,8 @@ and optionally the API server URL if different from the default. More details
 about these parameters can be found in the official Rayyan SDK repository.
 
 ```python
-from llama_index import download_loader
+from llama_index.readers.rayyan import RayyanReader
 
-RayyanReader = download_loader("RayyanReader")
 loader = RayyanReader(credentials_path="path/to/rayyan-creds.json")
 ```
 
diff --git a/llama-index-integrations/readers/llama-index-readers-readwise/README.md b/llama-index-integrations/readers/llama-index-readers-readwise/README.md
index 525cfe8b25..9aa461c582 100644
--- a/llama-index-integrations/readers/llama-index-readers-readwise/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-readwise/README.md
@@ -1,5 +1,9 @@
 # Readwise Reader
 
+```bash
+pip install llama-index-readers-readwise
+```
+
 Use Readwise's export API to fetch your highlights from web articles, epubs, pdfs, Kindle, YouTube, and load the resulting text into LLMs.
 
 ## Setup
@@ -12,9 +16,10 @@ Here is an example usage of the Readwise Reader:
 
 ```python
 import os
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
+
+from llama_index.readers.readwise import ReadwiseReader
 
-ReadwiseReader = download_loader("ReadwiseReader")
 token = os.getenv("READWISE_API_KEY")
 loader = ReadwiseReader(api_key=token)
 documents = loader.load_data()
@@ -28,9 +33,10 @@ You can also query for highlights that have been created after a certain time:
 ```python
 import os
 import datetime
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
+
+from llama_index.readers.readwise import ReadwiseReader
 
-ReadwiseReader = download_loader("ReadwiseReader")
 token = os.getenv("READWISE_API_KEY")
 loader = ReadwiseReader(api_key=token)
 seven_days_ago = datetime.datetime.now() - datetime.timedelta(days=7)
diff --git a/llama-index-integrations/readers/llama-index-readers-reddit/README.md b/llama-index-integrations/readers/llama-index-readers-reddit/README.md
index 7153d344f6..ea964afc45 100644
--- a/llama-index-integrations/readers/llama-index-readers-reddit/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-reddit/README.md
@@ -1,5 +1,9 @@
 # Reddit Reader
 
+```bash
+pip install llama-index-readers-reddit
+```
+
 For any subreddit(s) you're interested in, search for relevant posts using keyword(s) and load the resulting text in the post and and top-level comments into LLMs/ LangChains.
 
 ## Get your Reddit credentials ready
@@ -15,9 +19,9 @@ For any subreddit(s) you're interested in, search for relevant posts using keywo
 ### LlamaIndex
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 
-RedditReader = download_loader("RedditReader")
+from llama_index.readers.reddit import RedditReader
 
 subreddits = ["MachineLearning"]
 search_keys = ["PyTorch", "deploy"]
@@ -35,13 +39,13 @@ index.query("What are the pain points of PyTorch users?")
 ### LangChain
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 
 from langchain.agents import initialize_agent, Tool
 from langchain.llms import OpenAI
 from langchain.chains.conversation.memory import ConversationBufferMemory
 
-RedditReader = download_loader("RedditReader")
+from llama_index.readers.reddit import RedditReader
 
 subreddits = ["MachineLearning"]
 search_keys = ["PyTorch", "deploy"]
diff --git a/llama-index-integrations/readers/llama-index-readers-remote-depth/README.md b/llama-index-integrations/readers/llama-index-readers-remote-depth/README.md
index 48ba8b0ab8..886acec8dd 100644
--- a/llama-index-integrations/readers/llama-index-readers-remote-depth/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-remote-depth/README.md
@@ -1,5 +1,9 @@
 # Remote Page/File Loader
 
+```bash
+pip install llama-index-readers-remote-depth
+```
+
 This loader makes it easy to extract the text from the links available in a webpage URL, and extract the links presents in the page. It's based on `RemoteReader` (reading single page), that is based on `SimpleDirectoryReader` (parsing the document if file is a pdf, etc). It is an all-in-one tool for (almost) any group of urls.
 
 You can try with this MIT lecture link, it will be able to extract the syllabus, the PDFs, etc:
@@ -10,9 +14,7 @@ You can try with this MIT lecture link, it will be able to extract the syllabus,
 You need to specify the parameter `depth` to specify how many levels of links you want to extract. For example, if you want to extract the links in the page, and the links in the links in the page, you need to specify `depth=2`.
 
 ```python
-from llama_index import download_loader
-
-RemoteDepthReader = download_loader("RemoteDepthReader")
+from llama_index.readers.remote_depth import RemoteDepthReader
 
 loader = RemoteDepthReader()
 documents = loader.load_data(
diff --git a/llama-index-integrations/readers/llama-index-readers-remote/README.md b/llama-index-integrations/readers/llama-index-readers-remote/README.md
index f9ff15c271..c4f8e64a1c 100644
--- a/llama-index-integrations/readers/llama-index-readers-remote/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-remote/README.md
@@ -1,5 +1,9 @@
 # Remote Page/File Loader
 
+```bash
+pip install llama-index-readers-remote
+```
+
 This loader makes it easy to extract the text from any remote page or file using just its url. If there's a file at the url, this loader will download it temporarily and parse it using `SimpleDirectoryReader`. It is an all-in-one tool for (almost) any url.
 
 As a result, any page or type of file is supported. For instance, if a `.txt` url such as a [Project Gutenberg book](https://www.gutenberg.org/cache/epub/69994/pg69994.txt) is passed in, the text will be parsed as is. On the other hand, if a hosted .mp3 url is passed in, it will be downloaded and parsed using `AudioTranscriber`.
@@ -9,9 +13,7 @@ As a result, any page or type of file is supported. For instance, if a `.txt` ur
 To use this loader, you need to pass in a `Path` to a local file. Optionally, you may specify a `file_extractor` for the `SimpleDirectoryReader` to use, other than the default one.
 
 ```python
-from llama_index import download_loader
-
-RemoteReader = download_loader("RemoteReader")
+from llama_index.readers.remote import RemoteReader
 
 loader = RemoteReader()
 documents = loader.load_data(
diff --git a/llama-index-integrations/readers/llama-index-readers-s3/README.md b/llama-index-integrations/readers/llama-index-readers-s3/README.md
index f4413b0f5a..96c3ed4d95 100644
--- a/llama-index-integrations/readers/llama-index-readers-s3/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-s3/README.md
@@ -11,10 +11,6 @@ To use this loader, you need to pass in the name of your S3 Bucket. After that,
 Otherwise, you may specify a prefix if you only want to parse certain files in the Bucket, or a subdirectory. AWS Access Key credentials may either be passed in during initialization or stored locally (see above).
 
 ```python
-from llama_index import download_loader
-
-S3Reader = download_loader("S3Reader")
-
 loader = S3Reader(
     bucket="scrabble-dictionary",
     key="dictionary.txt",
diff --git a/llama-index-integrations/readers/llama-index-readers-sec-filings/README.md b/llama-index-integrations/readers/llama-index-readers-sec-filings/README.md
index 15d0155f31..4a2e0a33ab 100644
--- a/llama-index-integrations/readers/llama-index-readers-sec-filings/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-sec-filings/README.md
@@ -1,5 +1,9 @@
 # SEC DATA DOWNLOADER
 
+```bash
+pip install llama-index-readers-sec-filings
+```
+
 Please checkout this repo that I am building on SEC Question Answering Agent [SEC-QA](https://github.com/Athe-kunal/SEC-QA-Agent)
 
 This repository downloads all the texts from SEC documents (10-K and 10-Q). Currently, it is not supporting documents that are amended, but that will be added in the near futures.
@@ -21,9 +25,7 @@ The SEC Downloader expects 5 attributes
 ## Usage
 
 ```python
-from llama_index import download_loader
-
-SECFilingsLoader = download_loader("SECFilingsLoader")
+from llama_index.readers.sec_filings import SECFilingsLoader
 
 loader = SECFilingsLoader(tickers=["TSLA"], amount=3, filing_type="10-K")
 loader.load_data()
@@ -95,10 +97,10 @@ This loader is can be used with both Langchain and LlamaIndex.
 ### LlamaIndex
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
-from llama_index import SimpleDirectoryReader
+from llama_index.core import VectorStoreIndex, download_loader
+from llama_index.core import SimpleDirectoryReader
 
-SECFilingsLoader = download_loader("SECFilingsLoader")
+from llama_index.readers.sec_filings import SECFilingsLoader
 
 loader = SECFilingsLoader(tickers=["TSLA"], amount=3, filing_type="10-K")
 loader.load_data()
@@ -111,13 +113,12 @@ index.query("What are the risk factors of Tesla for the year 2022?")
 ### Langchain
 
 ```python
-from llama_index import download_loader
 from langchain.llms import OpenAI
 from langchain.chains import RetrievalQA
 from langchain.document_loaders import DirectoryLoader
 from langchain.indexes import VectorstoreIndexCreator
 
-SECFilingsLoader = download_loader("SECFilingsLoader")
+from llama_index.readers.sec_filings import SECFilingsLoader
 
 loader = SECFilingsLoader(tickers=["TSLA"], amount=3, filing_type="10-K")
 loader.load_data()
diff --git a/llama-index-integrations/readers/llama-index-readers-semanticscholar/README.md b/llama-index-integrations/readers/llama-index-readers-semanticscholar/README.md
index 0242f63cd6..08b6a486f6 100644
--- a/llama-index-integrations/readers/llama-index-readers-semanticscholar/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-semanticscholar/README.md
@@ -1,5 +1,11 @@
 # Semantic Scholar Loader
 
+```bash
+pip install llama-index-readers-semanticscholar
+
+pip install llama-index-llms-openai
+```
+
 Welcome to Semantic Scholar Loader. This module serves as a crucial utility for researchers and professionals looking to get scholarly articles and publications from the Semantic Scholar database.
 
 For any research topic you are interested in, this loader reads relevant papers from a search result in Semantic Scholar into `Documents`.
@@ -27,13 +33,10 @@ Here is an example of how to use this loader in `llama_index` and get citations
 ### LlamaIndex
 
 ```python
-from llama_index.llms import OpenAI
-from llama_index.query_engine import CitationQueryEngine
-from llama_index import (
-    VectorStoreIndex,
-    ServiceContext,
-)
-from llama_hub.semanticscholar import SemanticScholarReader
+from llama_index.llms.openai import OpenAI
+from llama_index.core.query_engine import CitationQueryEngine
+from llama_index.core import VectorStoreIndex, ServiceContext
+from llama_index.readers.semanticscholar import SemanticScholarReader
 
 s2reader = SemanticScholarReader()
 
diff --git a/llama-index-integrations/readers/llama-index-readers-singlestore/README.md b/llama-index-integrations/readers/llama-index-readers-singlestore/README.md
index 5ab20d4d6c..1b98a72969 100644
--- a/llama-index-integrations/readers/llama-index-readers-singlestore/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-singlestore/README.md
@@ -1,5 +1,9 @@
 # SingleStore Loader
 
+```bash
+pip install llama-index-readers-singlestore
+```
+
 The SingleStore Loader retrieves a set of documents from a specified table in a SingleStore database. The user initializes the loader with database information and then provides a search embedding for retrieving similar documents.
 
 ## Usage
@@ -7,7 +11,7 @@ The SingleStore Loader retrieves a set of documents from a specified table in a
 Here's an example usage of the SingleStoreReader:
 
 ```python
-from llama_hub.singlestore import SingleStoreReader
+from llama_index.readers.singlestore import SingleStoreReader
 
 # Initialize the reader with your SingleStore database credentials and other relevant details
 reader = SingleStoreReader(
diff --git a/llama-index-integrations/readers/llama-index-readers-smart-pdf-loader/README.md b/llama-index-integrations/readers/llama-index-readers-smart-pdf-loader/README.md
index ceccf5377e..1184ce086a 100644
--- a/llama-index-integrations/readers/llama-index-readers-smart-pdf-loader/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-smart-pdf-loader/README.md
@@ -1,5 +1,9 @@
 # Smart PDF Loader
 
+```bash
+pip install llama-index-readers-smart-pdf-loader
+```
+
 SmartPDFLoader is a super fast PDF reader that understands the layout structure of PDFs such as nested sections, nested lists, paragraphs and tables.
 It uses layout information to smartly chunk PDFs into optimal short contexts for LLMs.
 
@@ -16,7 +20,7 @@ pip install llmsherpa
 Here's an example usage of the SmartPDFLoader:
 
 ```python
-from llama_hub.smart_pdf_loader import SmartPDFLoader
+from llama_index.readers.smart_pdf_loader import SmartPDFLoader
 
 llmsherpa_api_url = "https://readers.llmsherpa.com/api/document/developer/parseDocument?renderFormat=all"
 pdf_url = "https://arxiv.org/pdf/1910.13461.pdf"  # also allowed is a file path e.g. /home/downloads/xyz.pdf
@@ -27,7 +31,7 @@ documents = pdf_loader.load_data(pdf_url)
 Now you can use the documents with other LlamaIndex components. For example, for retrieval augmented generation, try this:
 
 ```python
-from llama_index import VectorStoreIndex
+from llama_index.core import VectorStoreIndex
 
 index = VectorStoreIndex.from_documents(documents)
 query_engine = index.as_query_engine()
diff --git a/llama-index-integrations/readers/llama-index-readers-snowflake/README.md b/llama-index-integrations/readers/llama-index-readers-snowflake/README.md
index d0302f8343..c6c0daec2c 100644
--- a/llama-index-integrations/readers/llama-index-readers-snowflake/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-snowflake/README.md
@@ -1,5 +1,9 @@
 # Snowflake Loader
 
+```bash
+pip install llama-index-readers-snowflake
+```
+
 This loader connects to Snowflake (using SQLAlchemy under the hood). The user specifies a query and extracts Document objects corresponding to the results. You can use this loader to easily connect to a database on Snowflake and pass the documents into a `GPTSQLStructStoreIndex` from LlamaIndex.
 
 ## Usage
@@ -9,9 +13,7 @@ This loader connects to Snowflake (using SQLAlchemy under the hood). The user sp
 Here's an example usage of the SnowflakeReader.
 
 ```python
-from llama_index import download_loader
-
-SnowflakeReader = download_loader("SnowflakeReader")
+from llama_index.readers.snowflake import SnowflakeReader
 
 reader = SnowflakeReader(
     engine=your_sqlalchemy_engine,
@@ -27,9 +29,7 @@ documents = reader.load_data(query=query)
 Here's an example usage of the SnowflakeReader.
 
 ```python
-from llama_index import download_loader
-
-SnowflakeReader = download_loader("SnowflakeReader")
+from llama_index.readers.snowflake import SnowflakeReader
 
 reader = SnowflakeReader(
     account="your_account",
diff --git a/llama-index-integrations/readers/llama-index-readers-snscrape-twitter/README.md b/llama-index-integrations/readers/llama-index-readers-snscrape-twitter/README.md
index 6811555148..e6d7b0f9ae 100644
--- a/llama-index-integrations/readers/llama-index-readers-snscrape-twitter/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-snscrape-twitter/README.md
@@ -1,5 +1,9 @@
 # Snscrape twitter Loader
 
+```bash
+pip install llama-index-readers-snscrape-twitter
+```
+
 This loader loads documents from Twitter using the Snscrape Python package.
 
 ## Usage
@@ -7,10 +11,9 @@ This loader loads documents from Twitter using the Snscrape Python package.
 Here's an example usage of the SnscrapeReader.
 
 ```python
-from llama_index import download_loader
 import os
 
-SnscrapeReader = download_loader("SnscrapeTwitterReader")
+from llama_index.readers.snscrape_twitter import SnscrapeTwitterReader
 
 loader = SnscrapeReader()
 documents = loader.load_data(username="elonmusk", num_tweets=10)
diff --git a/llama-index-integrations/readers/llama-index-readers-spotify/README.md b/llama-index-integrations/readers/llama-index-readers-spotify/README.md
index 502d91d282..59cc209d62 100644
--- a/llama-index-integrations/readers/llama-index-readers-spotify/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-spotify/README.md
@@ -1,5 +1,9 @@
 # Spotify Loader
 
+```bash
+pip install llama-index-readers-spotify
+```
+
 This loader reads your Spotify account and loads saved albums, tracks, or playlists into `Documents`.
 
 As a prerequisite, you will need to register with [Spotify for Developers](https://developer.spotify.com) and create an app in order to get a `client_id` and a `client_secret`. You should then set a `redirect_uri` for the app (in the web dashboard under app settings). The `redirect_uri` does not need to be functional. You should then set the `client_id`, `client_secret`, and `redirect_uri` as environmental variables.
@@ -13,9 +17,7 @@ As a prerequisite, you will need to register with [Spotify for Developers](https
 Here's an example usage of the SpotifyReader. It will retrieve your saved albums, unless an optional `collection` argument is passed. Acceptable arguments are "albums", "tracks", and "playlists".
 
 ```python
-from llama_index import download_loader
-
-SpotifyReader = download_loader("SpotifyReader")
+from llama_index.readers.spotify import SpotifyReader
 
 loader = SpotifyReader()
 documents = loader.load_data()
@@ -28,9 +30,9 @@ This loader is designed to be used as a way to load data into [LlamaIndex](https
 ### LlamaIndex
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 
-SpotifyReader = download_loader("SpotifyReader")
+from llama_index.readers.spotify import SpotifyReader
 
 loader = SpotifyReader()
 documents = loader.load_data()
diff --git a/llama-index-integrations/readers/llama-index-readers-stripe-docs/README.md b/llama-index-integrations/readers/llama-index-readers-stripe-docs/README.md
index 33d669b7ae..15dddba1fa 100644
--- a/llama-index-integrations/readers/llama-index-readers-stripe-docs/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-stripe-docs/README.md
@@ -1,5 +1,9 @@
 # StripeDocs Loader
 
+```bash
+pip install llama-index-readers-stripe-docs
+```
+
 This loader asynchronously loads data from the [Stripe documentation](https://stripe.com/docs). It iterates through the Stripe sitemap to get all `/docs` references.
 
 It is based on the [Async Website Loader](https://llamahub.ai/l/web-async_web).
@@ -7,8 +11,8 @@ It is based on the [Async Website Loader](https://llamahub.ai/l/web-async_web).
 ## Usage
 
 ```python
-from llama_index import VectorStoreIndex
-from llama_hub.stripe_docs import StripeDocsReader
+from llama_index.core import VectorStoreIndex
+from llama_index.readers.stripe_docs import StripeDocsReader
 
 loader = StripeDocsReader()
 documents = loader.load_data()
diff --git a/llama-index-integrations/readers/llama-index-readers-telegram/README.md b/llama-index-integrations/readers/llama-index-readers-telegram/README.md
index 0ee82e3344..6b05fdc064 100644
--- a/llama-index-integrations/readers/llama-index-readers-telegram/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-telegram/README.md
@@ -1,5 +1,9 @@
 # Telegram Loader
 
+```bash
+pip install llama-index-readers-telegram
+```
+
 This loader fetches posts/chat messages/comments from Telegram channels or chats into `Document`s.
 
 Before working with Telegram’s API, you need to get your own API ID and hash:
@@ -31,9 +35,8 @@ If the `.session` file already existed, it will not login again, so be aware of
 To use this loader, you simply need to pass in a entity name.
 
 ```python
-from llama_index.core import download_loader
+from llama_index.readers.telegram import TelegramReader
 
-TelegramReader = download_loader("TelegramReader")
 loader = TelegramReader(
     session_name="[YOUR_SESSION_NAME]",
     api_id="[YOUR_API_ID]",
diff --git a/llama-index-integrations/readers/llama-index-readers-trello/README.md b/llama-index-integrations/readers/llama-index-readers-trello/README.md
index 787e53605b..aaf2fe018b 100644
--- a/llama-index-integrations/readers/llama-index-readers-trello/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-trello/README.md
@@ -1,5 +1,9 @@
 # Trello Loader
 
+```bash
+pip install llama-index-readers-trello
+```
+
 This loader loads documents from Trello. The user specifies an API key and API token to initialize the TrelloReader. They then specify a board_id to
 load in the corresponding Document objects representing Trello cards.
 
@@ -8,10 +12,9 @@ load in the corresponding Document objects representing Trello cards.
 Here's an example usage of the TrelloReader.
 
 ```python
-from llama_index import download_loader
 import os
 
-TrelloReader = download_loader("TrelloReader")
+from llama_index.readers.trello import TrelloReader
 
 reader = TrelloReader("<Trello_API_KEY>", "<Trello_API_TOKEN>")
 documents = reader.load_data(board_id="<BOARD_ID>")
diff --git a/llama-index-integrations/readers/llama-index-readers-weather/README.md b/llama-index-integrations/readers/llama-index-readers-weather/README.md
index f20eb54baa..93de04fc5c 100644
--- a/llama-index-integrations/readers/llama-index-readers-weather/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-weather/README.md
@@ -1,5 +1,9 @@
 # Weather Loader
 
+```bash
+pip install llama-index-readers-weather
+```
+
 This loader fetches the weather data from the [OpenWeatherMap](https://openweathermap.org/api)'s OneCall API, using the `pyowm` Python package. You must initialize the loader with your OpenWeatherMap API token, and then pass in the names of the cities you want the weather data for.
 
 OWM's One Call API provides the following weather data for any geographical coordinate: - Current weather - Hourly forecast for 48 hours - Daily forecast for 7 days
@@ -9,9 +13,7 @@ OWM's One Call API provides the following weather data for any geographical coor
 To use this loader, you need to pass in an array of city names (eg. [chennai, chicago]). Pass in the country codes as well for better accuracy.
 
 ```python
-from llama_index import download_loader
-
-WeatherReader = download_loader("WeatherReader")
+from llama_index.readers.weather import WeatherReader
 
 loader = WeatherReader(token="[YOUR_TOKEN]")
 documents = loader.load_data(places=["Chennai, IN", "Dublin, IE"])
diff --git a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/async_web/README.md b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/async_web/README.md
index 44c8985196..0706ae999e 100644
--- a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/async_web/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/async_web/README.md
@@ -1,5 +1,9 @@
 # Async Website Loader
 
+```bash
+pip install llama-index-readers-web
+```
+
 This loader is an asynchronous web scraper that fetches the text from static websites by converting the HTML to text.
 
 ## Usage
@@ -7,7 +11,7 @@ This loader is an asynchronous web scraper that fetches the text from static web
 To use this loader, you need to pass in an array of URLs.
 
 ```python
-from llama_index.readers.web.async_web.base import AsyncWebPageReader
+from llama_index.readers.web import AsyncWebPageReader
 
 # for jupyter notebooks uncomment the following two lines of code:
 # import nest_asyncio
diff --git a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/beautiful_soup_web/README.md b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/beautiful_soup_web/README.md
index 331cd5fce7..f3506e6818 100644
--- a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/beautiful_soup_web/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/beautiful_soup_web/README.md
@@ -1,5 +1,9 @@
 # Beautiful Soup Website Loader
 
+```bash
+pip install llama-index-readers-web
+```
+
 This loader is a web scraper that fetches the text from websites using the `Beautiful Soup` (aka `bs4`) Python package. Furthermore, the flexibility of Beautiful Soup allows for custom templates that enable the loader to extract the desired text from specific website designs, such as Substack. Check out the code to see how to add your own.
 
 ## Usage
@@ -7,9 +11,7 @@ This loader is a web scraper that fetches the text from websites using the `Beau
 To use this loader, you need to pass in an array of URLs.
 
 ```python
-from llama_index import download_loader
-
-BeautifulSoupWebReader = download_loader("BeautifulSoupWebReader")
+from llama_index.readers.web import BeautifulSoupWebReader
 
 loader = BeautifulSoupWebReader()
 documents = loader.load_data(urls=["https://google.com"])
@@ -38,9 +40,9 @@ This loader is designed to be used as a way to load data into [LlamaIndex](https
 ### LlamaIndex
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 
-BeautifulSoupWebReader = download_loader("BeautifulSoupWebReader")
+from llama_index.readers.web import BeautifulSoupWebReader
 
 loader = BeautifulSoupWebReader()
 documents = loader.load_data(urls=["https://google.com"])
@@ -53,12 +55,12 @@ index.query("What language is on this website?")
 Note: Make sure you change the description of the `Tool` to match your use-case.
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 from langchain.agents import initialize_agent, Tool
 from langchain.llms import OpenAI
 from langchain.chains.conversation.memory import ConversationBufferMemory
 
-BeautifulSoupWebReader = download_loader("BeautifulSoupWebReader")
+from llama_index.readers.web import BeautifulSoupWebReader
 
 loader = BeautifulSoupWebReader()
 documents = loader.load_data(urls=["https://google.com"])
diff --git a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/knowledge_base/README.md b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/knowledge_base/README.md
index 816e70f3e7..397380d648 100644
--- a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/knowledge_base/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/knowledge_base/README.md
@@ -1,5 +1,9 @@
 # Knowledge Base Website Loader
 
+```bash
+pip install llama-index-readers-web
+```
+
 This loader is a web crawler and scraper that fetches text content from websites hosting public knowledge bases. Examples are the [Intercom help center](https://www.intercom.com/help/en/) or the [Robinhood help center](https://robinhood.com/us/en/support/). Typically these sites have a directory structure with several sections and many articles in each section. This loader crawls and finds all links that match the article path provided, and scrapes the content of each article. This can be used to create bots that answer customer questions based on public documentation.
 
 It uses [Playwright](https://playwright.dev/python/) to drive a browser. This reduces the chance of getting blocked by Cloudflare or other CDNs, but makes it a bit more challenging to run on cloud services.
@@ -17,9 +21,7 @@ This installs the browsers that Playwright requires.
 To use this loader, you need to pass in the root URL and the string to search for in the URL to tell if the crawler has reached an article. You also need to pass in several CSS selectors so the cralwer knows which links to follow and which elements to extract content from. use
 
 ```python
-from llama_index import download_loader
-
-KnowledgeBaseWebReader = download_loader("KnowledgeBaseWebReader")
+from llama_index.readers.web import KnowledgeBaseWebReader
 
 loader = KnowledgeBaseWebReader()
 documents = loader.load_data(
@@ -39,9 +41,9 @@ This loader is designed to be used as a way to load data into [LlamaIndex](https
 ### LlamaIndex
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 
-KnowledgeBaseWebReader = download_loader("KnowledgeBaseWebReader")
+from llama_index.readers.web import KnowledgeBaseWebReader
 
 loader = KnowledgeBaseWebReader()
 documents = loader.load_data(
@@ -61,12 +63,12 @@ index.query("What languages does Intercom support?")
 Note: Make sure you change the description of the `Tool` to match your use-case.
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 from langchain.agents import initialize_agent, Tool
 from langchain.llms import OpenAI
 from langchain.chains.conversation.memory import ConversationBufferMemory
 
-KnowledgeBaseWebReader = download_loader("KnowledgeBaseWebReader")
+from llama_index.readers.web import KnowledgeBaseWebReader
 
 loader = KnowledgeBaseWebReader()
 documents = loader.load_data(
diff --git a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/main_content_extractor/README.md b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/main_content_extractor/README.md
index 6fb33b7b5e..1dea93a589 100644
--- a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/main_content_extractor/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/main_content_extractor/README.md
@@ -1,5 +1,9 @@
 # MainContentExtractor Website Loader
 
+```bash
+pip install llama-index-readers-web
+```
+
 This loader is a web scraper that fetches the text from static websites using the `MainContentExtractor` Python package.
 
 For information on how to extract main content, README in the following github repository
@@ -11,9 +15,7 @@ For information on how to extract main content, README in the following github r
 To use this loader, you need to pass in an array of URLs.
 
 ```python
-from llama_index import download_loader
-
-MainContentExtractorReader = download_loader("MainContentExtractorReader")
+from llama_index.readers.web import MainContentExtractorReader
 
 loader = MainContentExtractorReader()
 documents = loader.load_data(urls=["https://google.com"])
@@ -24,9 +26,9 @@ documents = loader.load_data(urls=["https://google.com"])
 ### LlamaIndex
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 
-MainContentExtractorReader = download_loader("MainContentExtractorReader")
+from llama_index.readers.web import MainContentExtractorReader
 
 loader = MainContentExtractorReader()
 documents = loader.load_data(urls=["https://google.com"])
@@ -39,12 +41,12 @@ index.query("What language is on this website?")
 Note: Make sure you change the description of the `Tool` to match your use-case.
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 from langchain.agents import initialize_agent, Tool
 from langchain.llms import OpenAI
 from langchain.chains.conversation.memory import ConversationBufferMemory
 
-MainContentExtractorReader = download_loader("MainContentExtractorReader")
+from llama_index.readers.web import MainContentExtractorReader
 
 loader = MainContentExtractorReader()
 documents = loader.load_data(urls=["https://google.com"])
diff --git a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/news/README.md b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/news/README.md
index a20e912379..b56b7b8326 100644
--- a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/news/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/news/README.md
@@ -1,5 +1,9 @@
 # News Article Loader
 
+```bash
+pip install llama-index-readers-web
+```
+
 This loader makes use of the `newspaper3k` library to parse web page urls which have news
 articles in them.
 
@@ -12,7 +16,7 @@ pip install newspaper3k
 Pass in an array of individual page URLs:
 
 ```python
-from llama_index.readers.web.news import NewsArticleReader
+from llama_index.readers.web import NewsArticleReader
 
 reader = NewsArticleReader(use_nlp=False)
 documents = reader.load_data(
diff --git a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/readability_web/README.md b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/readability_web/README.md
index 75da465912..d41b4fe2d9 100644
--- a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/readability_web/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/readability_web/README.md
@@ -1,5 +1,9 @@
 # Readability Webpage Loader
 
+```bash
+pip install llama-index-readers-web
+```
+
 Extracting relevant information from a fully rendered web page.
 During the processing, it is always assumed that web pages used as data sources contain textual content.
 
@@ -13,9 +17,7 @@ It is particularly effective for websites that use client-side rendering.
 To use this loader, you need to pass in a single of URL.
 
 ```python
-from llama_index import download_loader
-
-ReadabilityWebPageReader = download_loader("ReadabilityWebPageReader")
+from llama_index.readers.web import ReadabilityWebPageReader
 
 # or set proxy server for playwright: loader = ReadabilityWebPageReader(proxy="http://your-proxy-server:port")
 # For some specific web pages, you may need to set "wait_until" to "networkidle". loader = ReadabilityWebPageReader(wait_until="networkidle")
@@ -33,9 +35,7 @@ This loader is designed to be used as a way to load data into [LlamaIndex](https
 ### LlamaIndex
 
 ```python
-from llama_index import download_loader
-
-ReadabilityWebPageReader = download_loader("ReadabilityWebPageReader")
+from llama_index.readers.web import ReadabilityWebPageReader
 
 loader = ReadabilityWebPageReader()
 documents = loader.load_data(
@@ -51,12 +51,12 @@ print(index.query("What is pages?"))
 Note: Make sure you change the description of the `Tool` to match your use-case.
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 from langchain.agents import initialize_agent, Tool
 from langchain.llms import OpenAI
 from langchain.chains.conversation.memory import ConversationBufferMemory
 
-ReadabilityWebPageReader = download_loader("ReadabilityWebPageReader")
+from llama_index.readers.web import ReadabilityWebPageReader
 
 loader = ReadabilityWebPageReader()
 documents = loader.load_data(
diff --git a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/rss/README.md b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/rss/README.md
index 4431bbb140..5e4e4e5d54 100644
--- a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/rss/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/rss/README.md
@@ -1,5 +1,9 @@
 # RSS Loader
 
+```bash
+pip install llama-index-readers-web
+```
+
 This loader allows fetching text from an RSS feed. It uses the `feedparser` module
 to fetch the feed and optionally the `html2text` module to sanitize it.
 
@@ -8,9 +12,7 @@ to fetch the feed and optionally the `html2text` module to sanitize it.
 To use this loader, pass in an array of URL's.
 
 ```python
-from llama_index import download_loader
-
-RssReader = download_loader("RssReader")
+from llama_index.readers.web import RssReader
 
 reader = RssReader()
 documents = reader.load_data(
diff --git a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/rss_news/README.md b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/rss_news/README.md
index 7b6965399f..fb345385a3 100644
--- a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/rss_news/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/rss_news/README.md
@@ -9,7 +9,7 @@ To use this loader, pass in an array of URLs of RSS feeds. It will download the
 combine them:
 
 ```python
-from llama_index.readers.web.rss_news import RSSNewsReader
+from llama_index.core.readers.web.rss_news import RSSNewsReader
 
 urls = [
     "https://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml",
diff --git a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/simple_web/README.md b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/simple_web/README.md
index f14354eb20..b6f9d0ffa4 100644
--- a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/simple_web/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/simple_web/README.md
@@ -1,5 +1,9 @@
 # Simple Website Loader
 
+```bash
+pip install llama-index-readers-web
+```
+
 This loader is a simple web scraper that fetches the text from static websites by converting the HTML to text.
 
 ## Usage
@@ -7,9 +11,7 @@ This loader is a simple web scraper that fetches the text from static websites b
 To use this loader, you need to pass in an array of URLs.
 
 ```python
-from llama_index import download_loader
-
-SimpleWebPageReader = download_loader("SimpleWebPageReader")
+from llama_index.readers.web import SimpleWebPageReader
 
 loader = SimpleWebPageReader()
 documents = loader.load_data(urls=["https://google.com"])
@@ -22,9 +24,9 @@ This loader is designed to be used as a way to load data into [LlamaIndex](https
 ### LlamaIndex
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 
-SimpleWebPageReader = download_loader("SimpleWebPageReader")
+from llama_index.readers.web import SimpleWebPageReader
 
 loader = SimpleWebPageReader()
 documents = loader.load_data(urls=["https://google.com"])
@@ -37,12 +39,12 @@ index.query("What language is on this website?")
 Note: Make sure you change the description of the `Tool` to match your use-case.
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 from langchain.agents import initialize_agent, Tool
 from langchain.llms import OpenAI
 from langchain.chains.conversation.memory import ConversationBufferMemory
 
-SimpleWebPageReader = download_loader("SimpleWebPageReader")
+from llama_index.readers.web import SimpleWebPageReader
 
 loader = SimpleWebPageReader()
 documents = loader.load_data(urls=["https://google.com"])
diff --git a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/sitemap/README.md b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/sitemap/README.md
index 67066ecceb..b7b5f557fe 100644
--- a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/sitemap/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/sitemap/README.md
@@ -1,5 +1,9 @@
 # Sitemap Loader
 
+```bash
+pip install llama-index-readers-web
+```
+
 This loader is an asynchronous web scraper that fetches the text from static websites by using its sitemap and optionally converting the HTML to text.
 
 It is based on the [Async Website Loader](https://llama-hub-ui.vercel.app/l/web-async_web)
@@ -9,7 +13,7 @@ It is based on the [Async Website Loader](https://llama-hub-ui.vercel.app/l/web-
 To use this loader, you just declare the sitemap.xml url like this:
 
 ```python
-from llama_index.readers.web.sitemap import SitemapReader
+from llama_index.readers.web import SitemapReader
 
 # for jupyter notebooks uncomment the following two lines of code:
 # import nest_asyncio
diff --git a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/trafilatura_web/README.md b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/trafilatura_web/README.md
index 2dc5b29dad..a75908bb1d 100644
--- a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/trafilatura_web/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/trafilatura_web/README.md
@@ -1,5 +1,9 @@
 # Trafilatura Website Loader
 
+```bash
+pip install llama-index-readers-web
+```
+
 This loader is a web scraper that fetches the text from static websites using the `trafilatura` Python package.
 
 ## Usage
@@ -7,9 +11,7 @@ This loader is a web scraper that fetches the text from static websites using th
 To use this loader, you need to pass in an array of URLs.
 
 ```python
-from llama_index import download_loader
-
-TrafilaturaWebReader = download_loader("TrafilaturaWebReader")
+from llama_index.readers.web import TrafilaturaWebReader
 
 loader = TrafilaturaWebReader()
 documents = loader.load_data(urls=["https://google.com"])
@@ -22,9 +24,9 @@ This loader is designed to be used as a way to load data into [LlamaIndex](https
 ### LlamaIndex
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 
-TrafilaturaWebReader = download_loader("TrafilaturaWebReader")
+from llama_index.readers.web import TrafilaturaWebReader
 
 loader = TrafilaturaWebReader()
 documents = loader.load_data(urls=["https://google.com"])
@@ -37,12 +39,12 @@ index.query("What language is on this website?")
 Note: Make sure you change the description of the `Tool` to match your use-case.
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 from langchain.agents import initialize_agent, Tool
 from langchain.llms import OpenAI
 from langchain.chains.conversation.memory import ConversationBufferMemory
 
-TrafilaturaWebReader = download_loader("TrafilaturaWebReader")
+from llama_index.readers.web import TrafilaturaWebReader
 
 loader = TrafilaturaWebReader()
 documents = loader.load_data(urls=["https://google.com"])
diff --git a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/unstructured_web/README.md b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/unstructured_web/README.md
index de555a882e..671e1d915b 100644
--- a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/unstructured_web/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/unstructured_web/README.md
@@ -1,14 +1,16 @@
 # Unstructured.io URL Loader
 
+```bash
+pip install llama-index-readers-web
+```
+
 This loader extracts the text from URLs using [Unstructured.io](https://github.com/Unstructured-IO/unstructured). The partition_html function partitions an HTML document and returns a list
 of document Element objects.
 
 ## Usage
 
 ```python
-from llama_index import download_loader
-
-UnstructuredURLLoader = download_loader("UnstructuredURLLoader")
+from llama_index.readers.web import UnstructuredURLLoader
 
 urls = [
     "https://www.understandingwar.org/backgrounder/russian-offensive-campaign-assessment-february-8-2023",
diff --git a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/whole_site/README.md b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/whole_site/README.md
index 4a7f9268f9..7a758d4673 100644
--- a/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/whole_site/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/whole_site/README.md
@@ -1,5 +1,9 @@
 # WholeSiteReader
 
+```bash
+pip install llama-index-readers-web
+```
+
 The WholeSiteReader is a sophisticated web scraping tool that employs a breadth-first search (BFS) algorithm. It's designed to methodically traverse and extract content from entire websites, focusing specifically on predefined URL paths.
 
 ## Features
@@ -10,9 +14,8 @@ The WholeSiteReader is a sophisticated web scraping tool that employs a breadth-
 - **Selenium-Based:** Leverages Selenium for dynamic interaction with web pages, supporting JavaScript-rendered content.
 
 ```python
-from llama_index import download_loader
+from llama_index.readers.web import WholeSiteReader
 
-WholeSiteReader = download_loader("WholeSiteReader")
 # Initialize the scraper with a prefix URL and maximum depth
 scraper = WholeSiteReader(
     prefix="https://www.paulgraham.com/", max_depth=10  # Example prefix
@@ -31,9 +34,9 @@ This loader is designed to be used as a way to load data into [LlamaIndex](https
 ### LlamaIndex
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 
-WholeSiteReader = download_loader("WholeSiteReader")
+from llama_index.readers.web import WholeSiteReader
 
 # Initialize the scraper with a prefix URL and maximum depth
 scraper = WholeSiteReader(
@@ -54,12 +57,12 @@ index.query("What language is on this website?")
 Note: Make sure you change the description of the `Tool` to match your use-case.
 
 ```python
-from llama_index import VectorStoreIndex, download_loader
+from llama_index.core import VectorStoreIndex, download_loader
 from langchain.agents import initialize_agent, Tool
 from langchain.llms import OpenAI
 from langchain.chains.conversation.memory import ConversationBufferMemory
 
-WholeSiteReader = download_loader("WholeSiteReader")
+from llama_index.readers.web import WholeSiteReader
 
 # Initialize the scraper with a prefix URL and maximum depth
 scraper = WholeSiteReader(
diff --git a/llama-index-integrations/readers/llama-index-readers-whatsapp/README.md b/llama-index-integrations/readers/llama-index-readers-whatsapp/README.md
index 062ddad7e4..7e02868f20 100644
--- a/llama-index-integrations/readers/llama-index-readers-whatsapp/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-whatsapp/README.md
@@ -1,5 +1,9 @@
 # Whatsapp chat loader
 
+```bash
+pip install llama-index-readers-whatsapp
+```
+
 ## Export a Whatsapp chat
 
 1. Open a chat
@@ -16,9 +20,8 @@ For more info see [Whatsapp's Help Center](https://faq.whatsapp.com/118041407917
 
 ```python
 from pathlib import Path
-from llama_index import download_loader
 
-WhatsappChatLoader = download_loader("WhatsappChatLoader")
+from llama_index.readers.whatsapp import WhatsappChatLoader
 
 path = "whatsapp.txt"
 loader = WhatsappChatLoader(path=path)
diff --git a/llama-index-integrations/readers/llama-index-readers-wordlift/README.md b/llama-index-integrations/readers/llama-index-readers-wordlift/README.md
index 38a2b9c3c7..ef3f79e7ac 100644
--- a/llama-index-integrations/readers/llama-index-readers-wordlift/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-wordlift/README.md
@@ -1,5 +1,9 @@
 # WordLift Reader
 
+```bash
+pip install llama-index-readers-wordlift
+```
+
 The WordLift GraphQL Reader is a connector to fetch and transform data from a WordLift Knowledge Graph using your the WordLift Key. The connector provides a convenient way to load data from WordLift using a GraphQL query and transform it into a list of documents for further processing.
 
 ## Usage
@@ -15,10 +19,10 @@ Here's an example of how to use the WordLift GraphQL Reader:
 
 ```python
 import json
-from llama_index import VectorStoreIndex
-from llama_index.readers.schema import Document
+from llama_index.core import VectorStoreIndex
+from llama_index.core import Document
 from langchain.llms import OpenAI
-from llama_hub.wordlift import WordLiftLoader
+from llama_index.readers.wordlift import WordLiftLoader
 
 # Set up the necessary configuration options
 endpoint = "https://api.wordlift.io/graphql"
diff --git a/llama-index-integrations/readers/llama-index-readers-wordpress/README.md b/llama-index-integrations/readers/llama-index-readers-wordpress/README.md
index e46aadebd7..7c57428410 100644
--- a/llama-index-integrations/readers/llama-index-readers-wordpress/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-wordpress/README.md
@@ -1,5 +1,9 @@
 # Wordpress Loader
 
+```bash
+pip install llama-index-readers-wordpress
+```
+
 This loader fetches the text from Wordpress blog posts using the Wordpress API. It also uses the BeautifulSoup library to parse the HTML and extract the text from the articles.
 
 ## Usage
@@ -7,9 +11,7 @@ This loader fetches the text from Wordpress blog posts using the Wordpress API.
 To use this loader, you need to pass base url of the Wordpress installation (e.g. `https://www.mysite.com`), a username, and an application password for the user (more about application passwords [here](https://www.paidmembershipspro.com/create-application-password-wordpress/))
 
 ```python
-from llama_index import download_loader
-
-WordpressReader = download_loader("WordpressReader")
+from llama_index.readers.wordpress import WordpressReader
 
 loader = WordpressReader(
     url="https://www.mysite.com",
diff --git a/llama-index-integrations/readers/llama-index-readers-youtube-transcript/README.md b/llama-index-integrations/readers/llama-index-readers-youtube-transcript/README.md
index cbd593ac41..86333283ff 100644
--- a/llama-index-integrations/readers/llama-index-readers-youtube-transcript/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-youtube-transcript/README.md
@@ -1,5 +1,11 @@
 # Youtube Transcript Loader
 
+```bash
+pip install llama-hub-youtube-transcript
+
+pip install llama-index-readers-youtube-transcript
+```
+
 This loader fetches the text transcript of Youtube videos using the `youtube_transcript_api` Python package.
 
 ## Usage
@@ -9,7 +15,7 @@ To use this loader, you will need to first `pip install youtube_transcript_api`.
 Then, simply pass an array of YouTube links into `load_data`:
 
 ```python
-from llama_hub.youtube_transcript import YoutubeTranscriptReader
+from llama_index.readers.youtube_transcript import YoutubeTranscriptReader
 
 loader = YoutubeTranscriptReader()
 documents = loader.load_data(
@@ -22,10 +28,10 @@ Supported URL formats: + youtube.com/watch?v={video_id} (with or without 'www.')
 To programmatically check if a URL is supported:
 
 ```python
-from llama_hub.youtube_transcript import is_youtube_video
+from llama_index.readers.youtube_transcript.utils import is_youtube_video
 
 is_youtube_video("https://youtube.com/watch?v=j83jrh2")  # => True
 is_youtube_video("https://vimeo.com/272134160")  # => False
 ```
 
-This loader is designed to be used as a way to load data into [LlamaIndex](https://github.com/run-llama/llama_index/tree/main/llama_index) and/or subsequently used as a Tool in a [LangChain](https://github.com/hwchase17/langchain) Agent. See [here](https://github.com/emptycrown/llama-hub/tree/main) for examples.
+This loader is designed to be used as a way to load data into [LlamaIndex](https://github.com/run-llama/llama_index/tree/main/llama_index) and/or subsequently used as a Tool in a [LangChain](https://github.com/hwchase17/langchain) Agent.
diff --git a/llama-index-integrations/readers/llama-index-readers-zendesk/README.md b/llama-index-integrations/readers/llama-index-readers-zendesk/README.md
index 11aeec68fb..126da790a3 100644
--- a/llama-index-integrations/readers/llama-index-readers-zendesk/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-zendesk/README.md
@@ -1,5 +1,9 @@
 # Zendesk Loader
 
+```bash
+pip install llama-index-readers-zendesk
+```
+
 This loader fetches the text from Zendesk help articles using the Zendesk API. It also uses the BeautifulSoup library to parse the HTML and extract the text from the articles.
 
 ## Usage
@@ -7,9 +11,7 @@ This loader fetches the text from Zendesk help articles using the Zendesk API. I
 To use this loader, you need to pass in the subdomain of a Zendesk account. No authentication is required. You can also set the locale of articles as needed.
 
 ```python
-from llama_index import download_loader
-
-ZendeskReader = download_loader("ZendeskReader")
+from llama_index.readers.zendesk import ZendeskReader
 
 loader = ZendeskReader(zendesk_subdomain="my_subdomain", locale="en-us")
 documents = loader.load_data()
diff --git a/llama-index-integrations/readers/llama-index-readers-zep/README.md b/llama-index-integrations/readers/llama-index-readers-zep/README.md
index dae8826f38..5b940f9e85 100644
--- a/llama-index-integrations/readers/llama-index-readers-zep/README.md
+++ b/llama-index-integrations/readers/llama-index-readers-zep/README.md
@@ -1,5 +1,9 @@
 # Zep Reader
 
+```bash
+pip install llama-index-readers-zep
+```
+
 The Zep Reader returns a set of texts corresponding to a text query or embeddings retrieved from a Zep Collection.
 The Reader is initialized with a Zep API URL and optionally an API key. The Reader can then be used to load data
 from a Zep Document Collection.
@@ -23,14 +27,13 @@ results.
 import time
 from uuid import uuid4
 
-from llama_index.node_parser import SimpleNodeParser
-from llama_index.readers.schema import Document
+from llama_index.core.node_parser import SimpleNodeParser
+from llama_index.core import Document
 from zep_python import ZepClient
 from zep_python.document import Document as ZepDocument
 
-from llama_index import download_loader
 
-ZepReader = download_loader("ZepReader")
+from llama_index.readers.zep import ZepReader
 
 # Create a Zep collection
 zep_api_url = "http://localhost:8000"  # replace with your Zep API URL
diff --git a/llama-index-integrations/tools/llama-index-tools-arxiv/README.md b/llama-index-integrations/tools/llama-index-tools-arxiv/README.md
index 7faed436c7..32b4f0b0c7 100644
--- a/llama-index-integrations/tools/llama-index-tools-arxiv/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-arxiv/README.md
@@ -10,7 +10,7 @@ Here's an example usage of the ArxivToolSpec.
 
 ```python
 from llama_index.tools.arxiv import ArxivToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 tool_spec = ArxivToolSpec()
 
diff --git a/llama-index-integrations/tools/llama-index-tools-azure-cv/README.md b/llama-index-integrations/tools/llama-index-tools-azure-cv/README.md
index d63a39a202..4005de47f5 100644
--- a/llama-index-integrations/tools/llama-index-tools-azure-cv/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-azure-cv/README.md
@@ -12,7 +12,7 @@ Here's an example usage of the AzureCVToolSpec.
 
 ```python
 from llama_index.tools.azure_cv import AzureCVToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 tool_spec = AzureCVToolSpec(api_key="your-key", resource="your-resource")
 
diff --git a/llama-index-integrations/tools/llama-index-tools-azure-speech/README.md b/llama-index-integrations/tools/llama-index-tools-azure-speech/README.md
index 227278529a..d0e573a223 100644
--- a/llama-index-integrations/tools/llama-index-tools-azure-speech/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-azure-speech/README.md
@@ -8,7 +8,7 @@ This tool has a more extensive example usage documented in a Jupyter notebook [h
 
 ```python
 from llama_index.tools.azure_speech import AzureSpeechToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 speech_tool = AzureSpeechToolSpec(speech_key="your-key", region="eastus")
 
diff --git a/llama-index-integrations/tools/llama-index-tools-azure-translate/README.md b/llama-index-integrations/tools/llama-index-tools-azure-translate/README.md
index 69124ea986..ce5554c1b8 100644
--- a/llama-index-integrations/tools/llama-index-tools-azure-translate/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-azure-translate/README.md
@@ -13,7 +13,7 @@ This tool has a more extensive example usage documented in a Jupyter notebook [h
 Here's an example usage of the AzureTranslateToolSpec.
 
 ```python
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 from llama_index.tools.azure_translate import AzureTranslateToolSpec
 
 translate_tool = AzureTranslateToolSpec(api_key="your-key", region="eastus")
diff --git a/llama-index-integrations/tools/llama-index-tools-bing-search/README.md b/llama-index-integrations/tools/llama-index-tools-bing-search/README.md
index 762c18eecd..d23c9f8773 100644
--- a/llama-index-integrations/tools/llama-index-tools-bing-search/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-bing-search/README.md
@@ -12,7 +12,7 @@ Here's an example usage of the BingSearchToolSpec.
 
 ```python
 from llama_index.tools.bing_search import BingSearchToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 tool_spec = BingSearchToolSpec(api_key="your-key")
 
diff --git a/llama-index-integrations/tools/llama-index-tools-chatgpt-plugin/README.md b/llama-index-integrations/tools/llama-index-tools-chatgpt-plugin/README.md
index 6e60772180..e17ade24dd 100644
--- a/llama-index-integrations/tools/llama-index-tools-chatgpt-plugin/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-chatgpt-plugin/README.md
@@ -17,7 +17,7 @@ f = requests.get(
 manifest = yaml.safe_load(f)
 
 from llama_index.tools.chatgpt_plugin import ChatGPTPluginToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 from llama_index.tools.requests import RequestsToolSpec
 
 requests_spec = RequestsToolSpec()
diff --git a/llama-index-integrations/tools/llama-index-tools-code-interpreter/README.md b/llama-index-integrations/tools/llama-index-tools-code-interpreter/README.md
index 879fb398d9..82bf5353c6 100644
--- a/llama-index-integrations/tools/llama-index-tools-code-interpreter/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-code-interpreter/README.md
@@ -12,7 +12,7 @@ Here's an example usage of the CodeInterpreterToolSpec.
 
 ```python
 from llama_index.tools.code_interpreter import CodeInterpreterToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 code_spec = CodeInterpreterToolSpec()
 
diff --git a/llama-index-integrations/tools/llama-index-tools-cogniswitch/README.md b/llama-index-integrations/tools/llama-index-tools-cogniswitch/README.md
index deb89d56e1..e90f35f43c 100644
--- a/llama-index-integrations/tools/llama-index-tools-cogniswitch/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-cogniswitch/README.md
@@ -36,7 +36,7 @@ import warnings
 warnings.filterwarnings("ignore")
 import os
 from llama_index.tools.cogniswitch import CogniswitchToolSpec
-from llama_index.agent import ReActAgent
+from llama_index.core.agent import ReActAgent
 ```
 
 ### Cogniswitch Credentials and OpenAI token
diff --git a/llama-index-integrations/tools/llama-index-tools-database/README.md b/llama-index-integrations/tools/llama-index-tools-database/README.md
index 28d91a29c4..e7e7e84257 100644
--- a/llama-index-integrations/tools/llama-index-tools-database/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-database/README.md
@@ -10,7 +10,7 @@ Here's an example usage of the DatabaseToolSpec.
 
 ```python
 from llama_index.tools.database import DatabaseToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 db_tools = DatabaseToolSpec(
     scheme="postgresql",  # Database Scheme
diff --git a/llama-index-integrations/tools/llama-index-tools-duckduckgo/README.md b/llama-index-integrations/tools/llama-index-tools-duckduckgo/README.md
index 81cea3fd91..2eabbc8288 100644
--- a/llama-index-integrations/tools/llama-index-tools-duckduckgo/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-duckduckgo/README.md
@@ -10,7 +10,7 @@ Here's an example usage of the DuckDuckGoSearchToolSpec.
 
 ```python
 from llama_index.tools.duckduckgo import DuckDuckGoSearchToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 tool_spec = DuckDuckGoSearchToolSpec()
 
diff --git a/llama-index-integrations/tools/llama-index-tools-exa/README.md b/llama-index-integrations/tools/llama-index-tools-exa/README.md
index c1bbf5d785..0bd2e13572 100644
--- a/llama-index-integrations/tools/llama-index-tools-exa/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-exa/README.md
@@ -13,9 +13,7 @@ Here's an example usage of the ExaToolSpec.
 
 ```python
 from llama_index.tools.exa import ExaToolSpec
-from llama_index.agent.openai import (
-    OpenAIAgent,
-)  # requires llama-index-agent-openai
+from llama_index.agent.openai import OpenAIAgent
 
 exa_tool = ExaToolSpec(
     api_key="your-key",
diff --git a/llama-index-integrations/tools/llama-index-tools-graphql/README.md b/llama-index-integrations/tools/llama-index-tools-graphql/README.md
index 669e3638ff..7924ba3637 100644
--- a/llama-index-integrations/tools/llama-index-tools-graphql/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-graphql/README.md
@@ -12,7 +12,7 @@ This tool works best when the Agent has access to the GraphQL schema for the ser
 
 ```python
 from llama_index.tools.graphql import GraphQLToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 tool_spec = GraphQLToolSpec(
     url="https://spacex-production.up.railway.app/",
diff --git a/llama-index-integrations/tools/llama-index-tools-ionic-shopping/README.md b/llama-index-integrations/tools/llama-index-tools-ionic-shopping/README.md
index c4cf64e448..57670a7655 100644
--- a/llama-index-integrations/tools/llama-index-tools-ionic-shopping/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-ionic-shopping/README.md
@@ -1,5 +1,9 @@
 # LlamaIndex Tools Integration: Ionic Shopping
 
+```bash
+pip install llama-index-tools-ionic-shopping
+```
+
 [Ionic](https://ioniccommerce.com) is a plug and play ecommerce marketplace for AI Assistants.
 By including the Ionic Tool in your agent, you are effortlessly providing your users with the ability
 to shop and transact directly within your agent, and you’ll get a cut of the transaction.
@@ -10,7 +14,7 @@ Llearn more about how [Ionic attributes sales](https://docs.ioniccommerce.com/gu
 to your agent. Provide your Ionic API Key when instantiating the tool:
 
 ```python
-from llama_hub.tools.ionic_shopping.base import IonicShoppingToolSpec
+from llama_index.tools.ionic_shopping import IonicShoppingToolSpec
 
 ionic_tool = IonicShoppingToolSpec(api_key="<my Ionic API Key>").to_tool_list()
 ```
@@ -21,8 +25,10 @@ Try it out using the [Jupyter notebook](https://github.com/run-llama/llama-hub/b
 
 ```python
 import openai
-from llama_index.agent import OpenAIAgent  # requires llama-index-agent-openai
-from llama_hub.tools.ionic_shopping.base import IonicShoppingToolSpec
+from llama_index.core.agent import (
+    OpenAIAgent,
+)  # requires llama-index-agent-openai
+from llama_index.tools.ionic_shopping import IonicShoppingToolSpec
 
 openai.api_key = "sk-api-key"
 
diff --git a/llama-index-integrations/tools/llama-index-tools-metaphor/README.md b/llama-index-integrations/tools/llama-index-tools-metaphor/README.md
index 067f5964ae..21f0d0514d 100644
--- a/llama-index-integrations/tools/llama-index-tools-metaphor/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-metaphor/README.md
@@ -19,7 +19,7 @@ Here's an example usage of the MetaphorToolSpec.
 
 ```python
 from llama_index.tools.metaphor import MetaphorToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 metaphor_tool = MetaphorToolSpec(
     api_key="your-key",
diff --git a/llama-index-integrations/tools/llama-index-tools-multion/README.md b/llama-index-integrations/tools/llama-index-tools-multion/README.md
index 9788c99f3c..5eb221b6d3 100644
--- a/llama-index-integrations/tools/llama-index-tools-multion/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-multion/README.md
@@ -1,5 +1,9 @@
 # MultiOn Tool
 
+```bash
+pip install llama-index-tools-multion
+```
+
 This tool connects to [MultiOn](https://www.multion.ai/) to enable your agent to easily
 connect to the internet through your Chrome Web browser and act on your behalf
 
@@ -13,8 +17,8 @@ This tool has more a extensive example usage documented in a Jupyter notebook [h
 Here's an example usage of the MultionToolSpec.
 
 ```python
-from llama_index.tools.metaphor import MultionToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.tools.multion import MultionToolSpec
+from llama_index.agent.openai import OpenAIAgent
 
 multion_tool = MultionToolSpec()
 
diff --git a/llama-index-integrations/tools/llama-index-tools-neo4j/README.md b/llama-index-integrations/tools/llama-index-tools-neo4j/README.md
index 1f30d99f8b..9ae5a9c10b 100644
--- a/llama-index-integrations/tools/llama-index-tools-neo4j/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-neo4j/README.md
@@ -1,5 +1,9 @@
 # Neo4j Schema Query Builder
 
+```bash
+pip install llama-index-tools-neo4j
+```
+
 The `Neo4jQueryToolSpec` class provides a way to query a Neo4j graph database based on a provided schema definition. The class uses a language model to generate Cypher queries from user questions and has the capability to recover from Cypher syntax errors through a self-healing mechanism.
 
 ## Table of Contents
@@ -16,9 +20,9 @@ The `Neo4jQueryToolSpec` class provides a way to query a Neo4j graph database ba
 Initialize the `Neo4jQueryToolSpec` class with:
 
 ```python
-from llama_index.tools.neo4j_db import Neo4jQueryToolSpec
-from llama_index.llms import OpenAI
-from llama_index.agent import OpenAIAgent
+from llama_index.tools.neo4j import Neo4jQueryToolSpec
+from llama_index.llms.openai import OpenAI
+from llama_index.agent.openai import OpenAIAgent
 
 llm = OpenAI(model="gpt-4", openai_api_key="XXXX-XXXX", temperature=0)
 
diff --git a/llama-index-integrations/tools/llama-index-tools-notion/README.md b/llama-index-integrations/tools/llama-index-tools-notion/README.md
index 9b57466935..6945e6ad9e 100644
--- a/llama-index-integrations/tools/llama-index-tools-notion/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-notion/README.md
@@ -10,7 +10,7 @@ Here's an example usage of the NotionToolSpec.
 
 ```python
 from llama_index.tools.notion import NotionToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 tool_spec = NotionToolSpec()
 
diff --git a/llama-index-integrations/tools/llama-index-tools-openai/README.md b/llama-index-integrations/tools/llama-index-tools-openai/README.md
index f91e6ad6d7..fb1c3367fc 100644
--- a/llama-index-integrations/tools/llama-index-tools-openai/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-openai/README.md
@@ -9,9 +9,7 @@ This tool has a more extensive example usage documented in a Jupyter notebook [h
 ### Usage with Agent
 
 ```python
-from llama_index.tools.openai.image_generation import (
-    OpenAIImageGenerationToolSpec,
-)
+from llama_index.tools.openai import OpenAIImageGenerationToolSpec
 
 image_generation_tool = OpenAIImageGenerationToolSpec(
     api_key=os.environ["OPENAI_API_KEY"]
@@ -32,9 +30,7 @@ print(response)
 ### Usage directly
 
 ```python
-from llama_index.tools.openai.image_generation import (
-    OpenAIImageGenerationToolSpec,
-)
+from llama_index.tools.openai import OpenAIImageGenerationToolSpec
 
 image_generation_tool = OpenAIImageGenerationToolSpec(
     api_key=os.environ["OPENAI_API_KEY"]
diff --git a/llama-index-integrations/tools/llama-index-tools-openapi/README.md b/llama-index-integrations/tools/llama-index-tools-openapi/README.md
index c010ad25e1..58bda6d03d 100644
--- a/llama-index-integrations/tools/llama-index-tools-openapi/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-openapi/README.md
@@ -1,5 +1,9 @@
 # OpenAPI Tool
 
+```bash
+pip install llama-index-tools-openapi
+```
+
 This tool loads an OpenAPI spec and allow the Agent to retrieve endpoints and details about endpoints. The RequestsToolSpec can also be loaded into the agent to allow the agent to hit the necessary endpoints with a REST request.
 
 ## Usage
@@ -9,8 +13,8 @@ This tool has more extensive example usage documented in a Jupyter notebook [her
 Here's an example usage of the OpenAPIToolSpec.
 
 ```python
-from llama_hub.tools.openapi import OpenAPIToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.tools.openapi import OpenAPIToolSpec
+from llama_index.agent.openai import OpenAIAgent
 
 f = requests.get(
     "https://raw.githubusercontent.com/APIs-guru/openapi-directory/main/APIs/openai.com/1.2.0/openapi.yaml"
diff --git a/llama-index-integrations/tools/llama-index-tools-playgrounds/README.md b/llama-index-integrations/tools/llama-index-tools-playgrounds/README.md
index 084401a3da..10e41c325a 100644
--- a/llama-index-integrations/tools/llama-index-tools-playgrounds/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-playgrounds/README.md
@@ -2,6 +2,10 @@
 
 ## playgrounds_subgraph_connector
 
+```bash
+pip install llama-index-tools-playgrounds
+```
+
 Playgrounds API is a service provided by [Playgrounds Analytics](https://playgrounds.network) to streamline interfacing with decentralized subgraphs (indexed blockchain datasets).
 
 The `PlaygroundsSubgraphConnector` is a tool designed for LLM agents to seamlessly interface with and query subgraphs on The Graph's decentralized network via Playgrounds API.
@@ -23,10 +27,8 @@ To utilize the tool, simply initialize it with the appropriate `identifier` (Sub
 
 ```python
 import openai
-from llama_index.agent import OpenAIAgent
-from llama_index.tools.playgrounds_subgraph_connector import (
-    PlaygroundsSubgraphConnectorToolSpec,
-)
+from llama_index.agent.openai import OpenAIAgent
+from llama_index.tools.playgrounds import PlaygroundsSubgraphConnectorToolSpec
 
 
 def simple_test():
@@ -85,10 +87,8 @@ To utilize the tool, initialize it with the appropriate `identifier` (Subgraph I
 
 ```python
 import openai
-from llama_index.agent import OpenAIAgent
-from llama_index.tools.playgrounds_subgraph_inspector import (
-    PlaygroundsSubgraphInspectorToolSpec,
-)
+from llama_index.agent.openai import OpenAIAgent
+from llama_index.tools.playgrounds import PlaygroundsSubgraphInspectorToolSpec
 
 
 def inspect_subgraph(
diff --git a/llama-index-integrations/tools/llama-index-tools-python-file/README.md b/llama-index-integrations/tools/llama-index-tools-python-file/README.md
index bb21b84c77..424ed6212d 100644
--- a/llama-index-integrations/tools/llama-index-tools-python-file/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-python-file/README.md
@@ -10,7 +10,7 @@ Here's an example usage of the PythonFileToolSpec.
 
 ```python
 from llama_index.tools.python_file import PythonFileToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 pyfile = PythonFileToolSpec("./numpy_linalg.py")
 
diff --git a/llama-index-integrations/tools/llama-index-tools-requests/README.md b/llama-index-integrations/tools/llama-index-tools-requests/README.md
index ae2b99dc1a..541f772c58 100644
--- a/llama-index-integrations/tools/llama-index-tools-requests/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-requests/README.md
@@ -12,7 +12,7 @@ Here's an example usage of the RequestsToolSpec.
 
 ```python
 from llama_index.tools.requests import RequestsToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 domain_headers = {
     "api.openai.com": {
diff --git a/llama-index-integrations/tools/llama-index-tools-shopify/README.md b/llama-index-integrations/tools/llama-index-tools-shopify/README.md
index 32f530f504..c316c2f41f 100644
--- a/llama-index-integrations/tools/llama-index-tools-shopify/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-shopify/README.md
@@ -8,12 +8,16 @@ This tool has more extensive example usage documented in a Jupyter notebook [her
 
 In particular, the tool is very effective when combined with a method of retrieving data from the GraphQL schema definition.
 
+```bash
+pip install llama-index llama-index-readers-file llama-index-tools-shopify unstructured
+```
+
 ```python
 from llama_index.tools.shopify import ShopifyToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
-from llama_index.file.unstructured import UnstructuredReader
-from llama_index.tools.ondemand_loader_tool import OnDemandLoaderTool
+from llama_index.readers.file import UnstructuredReader
+from llama_index.core.tools.ondemand_loader_tool import OnDemandLoaderTool
 
 documentation_tool = OnDemandLoaderTool.from_defaults(
     UnstructuredReader(),
diff --git a/llama-index-integrations/tools/llama-index-tools-slack/README.md b/llama-index-integrations/tools/llama-index-tools-slack/README.md
index 50eef495a8..77a5fc2fdd 100644
--- a/llama-index-integrations/tools/llama-index-tools-slack/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-slack/README.md
@@ -6,7 +6,7 @@ This tool fetches the text from a list of Slack channels. You will need to initi
 
 ```python
 from llama_index.tools.slack import SlackToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 tool_spec = SlackToolSpec(slack_token="token")
 
diff --git a/llama-index-integrations/tools/llama-index-tools-tavily-research/README.md b/llama-index-integrations/tools/llama-index-tools-tavily-research/README.md
index 36ed8c5aee..60a1bc317b 100644
--- a/llama-index-integrations/tools/llama-index-tools-tavily-research/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-tavily-research/README.md
@@ -20,7 +20,7 @@ Here's an example usage of the TavilyToolSpec.
 
 ```python
 from llama_index.tools.tavily_research import TavilyToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 tavily_tool = TavilyToolSpec(
     api_key="your-key",
diff --git a/llama-index-integrations/tools/llama-index-tools-text-to-image/README.md b/llama-index-integrations/tools/llama-index-tools-text-to-image/README.md
index 32c697453d..5c6a010901 100644
--- a/llama-index-integrations/tools/llama-index-tools-text-to-image/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-text-to-image/README.md
@@ -10,7 +10,7 @@ Another example showcases retrieval augmentation over a knowledge corpus with te
 
 ```python
 from llama_index.tools.text_to_image import TextToImageToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 openai.api_key = "sk-your-key"
 tool_spec = TextToImageToolSpec()
diff --git a/llama-index-integrations/tools/llama-index-tools-vector-db/README.md b/llama-index-integrations/tools/llama-index-tools-vector-db/README.md
index d7a7140a7f..69f377837f 100644
--- a/llama-index-integrations/tools/llama-index-tools-vector-db/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-vector-db/README.md
@@ -6,9 +6,9 @@ This tool wraps a VectorStoreIndex and enables a agent to call it with queries a
 
 ```python
 from llama_index.tools.vector_db import VectorDB
-from llama_index.agent import OpenAIAgent
-from llama_index.vector_stores.types import VectorStoreInfo
-from llama_index import VectorStoreIndex
+from llama_index.agent.openai import OpenAIAgent
+from llama_index.core.vector_stores import VectorStoreInfo
+from llama_index.core import VectorStoreIndex
 
 index = VectorStoreIndex(nodes=nodes)
 tool_spec = VectorDB(index=index)
diff --git a/llama-index-integrations/tools/llama-index-tools-waii/README.md b/llama-index-integrations/tools/llama-index-tools-waii/README.md
index f1b5c6ca29..1da3f82813 100644
--- a/llama-index-integrations/tools/llama-index-tools-waii/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-waii/README.md
@@ -52,8 +52,8 @@ print(index.query("Which table contains most columns?"))
 #### Initialize the agent:
 
 ```python
-from llama_index.agent import OpenAIAgent
-from llama_index.llms import OpenAI
+from llama_index.agent.openai import OpenAIAgent
+from llama_index.llms.openai import OpenAI
 
 agent = OpenAIAgent.from_tools(
     waii_tool.to_tool_list(), llm=OpenAI(model="gpt-4-1106-preview")
diff --git a/llama-index-integrations/tools/llama-index-tools-weather/README.md b/llama-index-integrations/tools/llama-index-tools-weather/README.md
index 251f674566..434a965cca 100644
--- a/llama-index-integrations/tools/llama-index-tools-weather/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-weather/README.md
@@ -13,7 +13,7 @@ Here's an example usage of the OpenWeatherMapToolSpec.
 
 ```python
 from llama_index.tools.weather import OpenWeatherMapToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 tool_spec = OpenWeatherMapToolSpec(key="...")
 
diff --git a/llama-index-integrations/tools/llama-index-tools-wikipedia/README.md b/llama-index-integrations/tools/llama-index-tools-wikipedia/README.md
index 0b6915d30d..9c8e83aa66 100644
--- a/llama-index-integrations/tools/llama-index-tools-wikipedia/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-wikipedia/README.md
@@ -8,7 +8,7 @@ This tool has more extensive example usage documented in a Jupyter notebook [her
 
 ```python
 from llama_index.tools.wikipedia import WikipediaToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 tool_spec = WikipediaToolSpec()
 
diff --git a/llama-index-integrations/tools/llama-index-tools-wolfram-alpha/README.md b/llama-index-integrations/tools/llama-index-tools-wolfram-alpha/README.md
index d7dc202b31..c6619a7015 100644
--- a/llama-index-integrations/tools/llama-index-tools-wolfram-alpha/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-wolfram-alpha/README.md
@@ -12,7 +12,7 @@ Here's an example usage of the WolframAlphaToolSpec.
 
 ```python
 from llama_index.tools.wolfram_alpha import WolframAlphaToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 
 wolfram_spec = WolframAlphaToolSpec(app_id="API-key")
diff --git a/llama-index-integrations/tools/llama-index-tools-yahoo-finance/README.md b/llama-index-integrations/tools/llama-index-tools-yahoo-finance/README.md
index 21779bb953..b2c3a4ef84 100644
--- a/llama-index-integrations/tools/llama-index-tools-yahoo-finance/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-yahoo-finance/README.md
@@ -8,7 +8,7 @@ Here's an example of how to use this tool:
 
 ```python
 from llama_index.tools.yahoo_finance import YahooFinanceToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 tool_spec = YahooFinanceToolSpec()
 agent = OpenAIAgent.from_tools(tool_spec.to_tool_list())
diff --git a/llama-index-integrations/tools/llama-index-tools-zapier/README.md b/llama-index-integrations/tools/llama-index-tools-zapier/README.md
index d26c068576..cb3060d829 100644
--- a/llama-index-integrations/tools/llama-index-tools-zapier/README.md
+++ b/llama-index-integrations/tools/llama-index-tools-zapier/README.md
@@ -10,7 +10,7 @@ Here's an example usage of the ZapierToolSpec.
 
 ```python
 from llama_index.tools.zapier import ZapierToolSpec
-from llama_index.agent import OpenAIAgent
+from llama_index.agent.openai import OpenAIAgent
 
 
 zapier_spec = ZapierToolSpec(api_key="sk-ak-your-key")
diff --git a/llama-index-packs/llama-index-packs-chroma-autoretrieval/README.md b/llama-index-packs/llama-index-packs-chroma-autoretrieval/README.md
index 7f70de593e..dc46644c7b 100644
--- a/llama-index-packs/llama-index-packs-chroma-autoretrieval/README.md
+++ b/llama-index-packs/llama-index-packs-chroma-autoretrieval/README.md
@@ -31,7 +31,7 @@ Then, you can set up the pack like so:
 
 ```python
 # setup pack arguments
-from llama_index.core.vector_stores.types import MetadataInfo, VectorStoreInfo
+from llama_index.core.vector_stores import MetadataInfo, VectorStoreInfo
 
 vector_store_info = VectorStoreInfo(
     content_info="brief biography of celebrities",
diff --git a/llama-index-packs/llama-index-packs-cogniswitch-agent/README.md b/llama-index-packs/llama-index-packs-cogniswitch-agent/README.md
index 476ee35eb4..0723ca6d75 100644
--- a/llama-index-packs/llama-index-packs-cogniswitch-agent/README.md
+++ b/llama-index-packs/llama-index-packs-cogniswitch-agent/README.md
@@ -42,7 +42,7 @@ llamaindex-cli download-llamapack CogniswitchAgentPack --download-dir ./cs_pack
 import warnings
 
 warnings.filterwarnings("ignore")
-from llama_index.core.llama_packs import CogniswitchAgentPack
+from llama_index.packs.cogniswitch_agent import CogniswitchAgentPack
 import os
 
 
diff --git a/llama-index-packs/llama-index-packs-deeplake-deepmemory-retriever/README.md b/llama-index-packs/llama-index-packs-deeplake-deepmemory-retriever/README.md
index 7546a55058..2be08da526 100644
--- a/llama-index-packs/llama-index-packs-deeplake-deepmemory-retriever/README.md
+++ b/llama-index-packs/llama-index-packs-deeplake-deepmemory-retriever/README.md
@@ -17,7 +17,7 @@ You can then inspect the files at `./deepmemory_pack` and use them as a template
 You can download the pack to a `./deepmemory_pack` directory:
 
 ```python
-from llama_hub.llama_pack import download_llama_pack
+from llama_index.core.llama_pack import download_llama_pack
 
 # download and install dependencies
 DeepMemoryRetriever = download_llama_pack(
@@ -31,7 +31,7 @@ Then, you can set up the pack like so:
 
 ```python
 # setup pack arguments
-from llama_index.core.vector_stores.types import MetadataInfo, VectorStoreInfo
+from llama_index.core.vector_stores import MetadataInfo, VectorStoreInfo
 
 nodes = [...]
 
diff --git a/llama-index-packs/llama-index-packs-deeplake-multimodal-retrieval/README.md b/llama-index-packs/llama-index-packs-deeplake-multimodal-retrieval/README.md
index 50427fe9d3..72fc3f3910 100644
--- a/llama-index-packs/llama-index-packs-deeplake-multimodal-retrieval/README.md
+++ b/llama-index-packs/llama-index-packs-deeplake-multimodal-retrieval/README.md
@@ -17,7 +17,7 @@ You can then inspect the files at `./deeplake_multimodal_pack` and use them as a
 You can download the pack to a `./deeplake_multimodal_pack` directory:
 
 ```python
-from llama_hub.llama_pack import download_llama_pack
+from llama_index.core.llama_pack import download_llama_pack
 
 # download and install dependencies
 DeepLakeMultimodalRetriever = download_llama_pack(
@@ -31,7 +31,7 @@ Then, you can set up the pack like so:
 
 ```python
 # setup pack arguments
-from llama_index.core.vector_stores.types import MetadataInfo, VectorStoreInfo
+from llama_index.core.vector_stores import MetadataInfo, VectorStoreInfo
 
 # collection of image and text nodes
 nodes = [...]
diff --git a/llama-index-packs/llama-index-packs-dense-x-retrieval/README.md b/llama-index-packs/llama-index-packs-dense-x-retrieval/README.md
index 4219bb4489..0b8bf6c6ae 100644
--- a/llama-index-packs/llama-index-packs-dense-x-retrieval/README.md
+++ b/llama-index-packs/llama-index-packs-dense-x-retrieval/README.md
@@ -29,7 +29,7 @@ You can then inspect the files at `./dense_pack` and use them as a template for
 You can download the pack to a the `./dense_pack` directory:
 
 ```python
-from llama_index import SimpleDirectoryReader
+from llama_index.core import SimpleDirectoryReader
 from llama_index.core.llama_pack import download_llama_pack
 
 # download and install dependencies
diff --git a/llama-index-packs/llama-index-packs-evaluator-benchmarker/README.md b/llama-index-packs/llama-index-packs-evaluator-benchmarker/README.md
index 3fff5c2aca..ba1ea63b00 100644
--- a/llama-index-packs/llama-index-packs-evaluator-benchmarker/README.md
+++ b/llama-index-packs/llama-index-packs-evaluator-benchmarker/README.md
@@ -32,8 +32,8 @@ single-grading evaluation — in this case, the usage flow remains the same.
 from llama_index.core.llama_dataset import download_llama_dataset
 from llama_index.core.llama_pack import download_llama_pack
 from llama_index.core.evaluation import PairwiseComparisonEvaluator
-from llama_index.core.llms import OpenAI
-from llama_index import ServiceContext
+from llama_index.llms.openai import OpenAI
+from llama_index.core import ServiceContext
 
 # download a LabelledRagDataset from llama-hub
 pairwise_dataset = download_llama_dataset(
diff --git a/llama-index-packs/llama-index-packs-fuzzy-citation/README.md b/llama-index-packs/llama-index-packs-fuzzy-citation/README.md
index edb6f077ac..4b90e932aa 100644
--- a/llama-index-packs/llama-index-packs-fuzzy-citation/README.md
+++ b/llama-index-packs/llama-index-packs-fuzzy-citation/README.md
@@ -21,7 +21,7 @@ You can then inspect the files at `./fuzzy_citation_pack` and use them as a temp
 You can download the pack to a the `./fuzzy_citation_pack` directory:
 
 ```python
-from llama_index import Document, VectorStoreIndex
+from llama_index.core import Document, VectorStoreIndex
 from llama_index.core.llama_pack import download_llama_pack
 
 # download and install dependencies
diff --git a/llama-index-packs/llama-index-packs-gmail-openai-agent/README.md b/llama-index-packs/llama-index-packs-gmail-openai-agent/README.md
index a8cd8cec53..1e13d1329d 100644
--- a/llama-index-packs/llama-index-packs-gmail-openai-agent/README.md
+++ b/llama-index-packs/llama-index-packs-gmail-openai-agent/README.md
@@ -43,7 +43,7 @@ agent = gmail_agent_pack.agent
 response = agent.chat("What is my most recent email?")
 
 # Use the tool spec in another agent
-from llama_index.core.agents import ReActAgent
+from llama_index.core.agent import ReActAgent
 
 tool_spec = gmail_agent_pack.tool_spec
 agent = ReActAgent.from_tools(tool_spec.to_tool_lost())
diff --git a/llama-index-packs/llama-index-packs-koda-retriever/README.md b/llama-index-packs/llama-index-packs-koda-retriever/README.md
index 582d67388a..c703e1467d 100644
--- a/llama-index-packs/llama-index-packs-koda-retriever/README.md
+++ b/llama-index-packs/llama-index-packs-koda-retriever/README.md
@@ -29,7 +29,7 @@ Please see the [examples](./examples/) folder for more specific examples.
 from llama_index.packs.koda_retriever import KodaRetriever
 from llama_index.core import VectorStoreIndex
 from llama_index.llms.openai import OpenAI
-from llama_index.embeddings.openai.base import OpenAIEmbedding
+from llama_index.embeddings.openai import OpenAIEmbedding
 from llama_index.core.postprocessor import LLMRerank
 from llama_index.core import Settings
 
diff --git a/llama-index-packs/llama-index-packs-multidoc-autoretrieval/README.md b/llama-index-packs/llama-index-packs-multidoc-autoretrieval/README.md
index d8250aa765..f5ddbcdd8d 100644
--- a/llama-index-packs/llama-index-packs-multidoc-autoretrieval/README.md
+++ b/llama-index-packs/llama-index-packs-multidoc-autoretrieval/README.md
@@ -31,7 +31,7 @@ Then, you can set up the pack like so:
 
 ```python
 # setup pack arguments
-from llama_index.core.vector_stores.types import MetadataInfo, VectorStoreInfo
+from llama_index.core.vector_stores import MetadataInfo, VectorStoreInfo
 
 import weaviate
 
diff --git a/llama-index-packs/llama-index-packs-nebulagraph-query-engine/README.md b/llama-index-packs/llama-index-packs-nebulagraph-query-engine/README.md
index a776ac5c2c..5b3538fda5 100644
--- a/llama-index-packs/llama-index-packs-nebulagraph-query-engine/README.md
+++ b/llama-index-packs/llama-index-packs-nebulagraph-query-engine/README.md
@@ -37,11 +37,15 @@ From here, you can use the pack, or inspect and modify the pack in `./nebulagrap
 
 Then, you can set up the pack like so:
 
+```bash
+pip install llama-index-readers-wikipedia
+```
+
 ```python
 # Load the docs (example of Paleo diet from Wikipedia)
-from llama_index import download_loader
 
-WikipediaReader = download_loader("WikipediaReader")
+from llama_index.readers.wikipedia import WikipediaReader
+
 loader = WikipediaReader()
 docs = loader.load_data(pages=["Paleolithic diet"], auto_suggest=False)
 print(f"Loaded {len(docs)} documents")
@@ -75,7 +79,7 @@ nebulagraph_pack = NebulaGraphQueryEnginePack(
 Optionally, you can pass in the `query_engine_type` from `NebulaGraphQueryEngineType` to construct `NebulaGraphQueryEnginePack`. If `query_engine_type` is not defined, it defaults to Knowledge Graph vector based entity retrieval.
 
 ```python
-from llama_index.packs.nebulagraph_query_engine.base import (
+from llama_index.core.packs.nebulagraph_query_engine.base import (
     NebulaGraphQueryEngineType,
 )
 
diff --git a/llama-index-packs/llama-index-packs-neo4j-query-engine/README.md b/llama-index-packs/llama-index-packs-neo4j-query-engine/README.md
index c508cc743a..2582fe676a 100644
--- a/llama-index-packs/llama-index-packs-neo4j-query-engine/README.md
+++ b/llama-index-packs/llama-index-packs-neo4j-query-engine/README.md
@@ -37,11 +37,15 @@ From here, you can use the pack, or inspect and modify the pack in `./neo4j_pack
 
 Then, you can set up the pack like so:
 
+```bash
+pip install llama-index-readers-wikipedia
+```
+
 ```python
 # Load the docs (example of Paleo diet from Wikipedia)
-from llama_index import download_loader
 
-WikipediaReader = download_loader("WikipediaReader")
+from llama_index.readers.wikipedia import WikipediaReader
+
 loader = WikipediaReader()
 docs = loader.load_data(pages=["Paleolithic diet"], auto_suggest=False)
 print(f"Loaded {len(docs)} documents")
@@ -63,7 +67,7 @@ neo4j_pack = Neo4jQueryEnginePack(
 Optionally, you can pass in the `query_engine_type` from `Neo4jQueryEngineType` to construct `Neo4jQueryEnginePack`. If `query_engine_type` is not defined, it defaults to Knowledge Graph vector based entity retrieval.
 
 ```python
-from llama_index.packs.neo4j_query_engine.base import Neo4jQueryEngineType
+from llama_index.core.packs.neo4j_query_engine.base import Neo4jQueryEngineType
 
 # create the pack
 neo4j_pack = Neo4jQueryEnginePack(
diff --git a/llama-index-packs/llama-index-packs-rag-cli-local/README.md b/llama-index-packs/llama-index-packs-rag-cli-local/README.md
index 90352835e6..ee3ba4a0d2 100644
--- a/llama-index-packs/llama-index-packs-rag-cli-local/README.md
+++ b/llama-index-packs/llama-index-packs-rag-cli-local/README.md
@@ -21,7 +21,7 @@ which makes it hard to load directly.
 We will show you how to import the agent from these files!
 
 ```python
-from llama_index.llama_pack import download_llama_pack
+from llama_index.core.llama_pack import download_llama_pack
 
 # download and install dependencies
 download_llama_pack("LocalRAGCLIPack", "./local_rag_cli_pack", skip_load=True)
diff --git a/llama-index-packs/llama-index-packs-rag-evaluator/README.md b/llama-index-packs/llama-index-packs-rag-evaluator/README.md
index abe136c896..d5a6d405f7 100644
--- a/llama-index-packs/llama-index-packs-rag-evaluator/README.md
+++ b/llama-index-packs/llama-index-packs-rag-evaluator/README.md
@@ -25,7 +25,7 @@ built off of its source documents.
 ```python
 from llama_index.core.llama_dataset import download_llama_dataset
 from llama_index.core.llama_pack import download_llama_pack
-from llama_index import VectorStoreIndex
+from llama_index.core import VectorStoreIndex
 
 # download a LabelledRagDataset from llama-hub
 rag_dataset, documents = download_llama_dataset(
diff --git a/llama-index-packs/llama-index-packs-redis-ingestion-pipeline/README.md b/llama-index-packs/llama-index-packs-redis-ingestion-pipeline/README.md
index 6c4dc6de2f..cbe2a65e51 100644
--- a/llama-index-packs/llama-index-packs-redis-ingestion-pipeline/README.md
+++ b/llama-index-packs/llama-index-packs-redis-ingestion-pipeline/README.md
@@ -30,8 +30,8 @@ From here, you can use the pack, or inspect and modify the pack in `./redis_inge
 Then, you can set up the pack like so:
 
 ```python
-from llama_index.core.text_splitter import SentenceSplitter
-from llama_index.core.embeddings import OpenAIEmbedding
+from llama_index.core.node_parser import SentenceSplitter
+from llama_index.embeddings.openai import OpenAIEmbedding
 
 transformations = [SentenceSplitter(), OpenAIEmbedding()]
 
diff --git a/llama-index-packs/llama-index-packs-retry-engine-weaviate/README.md b/llama-index-packs/llama-index-packs-retry-engine-weaviate/README.md
index 345ca13440..5f88ce6597 100644
--- a/llama-index-packs/llama-index-packs-retry-engine-weaviate/README.md
+++ b/llama-index-packs/llama-index-packs-retry-engine-weaviate/README.md
@@ -31,7 +31,7 @@ Then, you can set up the pack like so:
 
 ```python
 # setup pack arguments
-from llama_index.core.vector_stores.types import MetadataInfo, VectorStoreInfo
+from llama_index.core.vector_stores import MetadataInfo, VectorStoreInfo
 
 vector_store_info = VectorStoreInfo(
     content_info="brief biography of celebrities",
diff --git a/llama-index-packs/llama-index-packs-self-discover/README.md b/llama-index-packs/llama-index-packs-self-discover/README.md
index b7b923ca17..51ce36fc26 100644
--- a/llama-index-packs/llama-index-packs-self-discover/README.md
+++ b/llama-index-packs/llama-index-packs-self-discover/README.md
@@ -36,7 +36,7 @@ There are two ways using LlamaPack:
 ### Using `download_llama_pack`
 
 ```python
-from llama_index.llama_pack import download_llama_pack
+from llama_index.core.llama_pack import download_llama_pack
 
 # download and install dependencies
 SelfDiscoverPack = download_llama_pack(
diff --git a/llama-index-packs/llama-index-packs-self-rag/README.md b/llama-index-packs/llama-index-packs-self-rag/README.md
index 56bf4cec94..5caf1f240c 100644
--- a/llama-index-packs/llama-index-packs-self-rag/README.md
+++ b/llama-index-packs/llama-index-packs-self-rag/README.md
@@ -28,7 +28,7 @@ huggingface-cli download m4r1/selfrag_llama2_7b-GGUF selfrag_llama2_7b.q4_k_m.gg
 ```
 
 ```python
-from llama_index.llama_pack import download_llama_pack
+from llama_index.core.llama_pack import download_llama_pack
 
 # download and install dependencies
 SelfRAGPack = download_llama_pack("SelfRAGPack", "./self_rag_pack")
diff --git a/llama-index-packs/llama-index-packs-sub-question-weaviate/README.md b/llama-index-packs/llama-index-packs-sub-question-weaviate/README.md
index 0ef0cdfb83..5d258b12f1 100644
--- a/llama-index-packs/llama-index-packs-sub-question-weaviate/README.md
+++ b/llama-index-packs/llama-index-packs-sub-question-weaviate/README.md
@@ -31,7 +31,7 @@ Then, you can set up the pack like so:
 
 ```python
 # setup pack arguments
-from llama_index.core.vector_stores.types import MetadataInfo, VectorStoreInfo
+from llama_index.core.vector_stores import MetadataInfo, VectorStoreInfo
 
 vector_store_info = VectorStoreInfo(
     content_info="brief biography of celebrities",
diff --git a/llama-index-packs/llama-index-packs-timescale-vector-autoretrieval/README.md b/llama-index-packs/llama-index-packs-timescale-vector-autoretrieval/README.md
index a9204d506b..56a07d28ee 100644
--- a/llama-index-packs/llama-index-packs-timescale-vector-autoretrieval/README.md
+++ b/llama-index-packs/llama-index-packs-timescale-vector-autoretrieval/README.md
@@ -51,7 +51,7 @@ You can then inspect the files at `./tsv_pack` and use them as a template for yo
 You can download the pack to a the `./tsv_pack` directory:
 
 ```python
-from llama_hub.llama_pack import download_llama_pack
+from llama_index.core.llama_pack import download_llama_pack
 
 # download and install dependencies
 TimescaleVectorAutoretrievalPack = download_llama_pack(
@@ -65,7 +65,7 @@ Then, you can set up the pack like so:
 
 ```python
 # setup pack arguments
-from llama_index.core.vector_stores.types import MetadataInfo, VectorStoreInfo
+from llama_index.core.vector_stores import MetadataInfo, VectorStoreInfo
 from timescale_vector import client
 from dotenv import load_dotenv, find_dotenv
 import os
diff --git a/llama-index-packs/llama-index-packs-vanna/README.md b/llama-index-packs/llama-index-packs-vanna/README.md
index d4b42ec9cc..42fde06f55 100644
--- a/llama-index-packs/llama-index-packs-vanna/README.md
+++ b/llama-index-packs/llama-index-packs-vanna/README.md
@@ -24,7 +24,7 @@ You can then inspect the files at `./vanna_pack` and use them as a template for
 You can download the pack to a `./vanna_pack` directory:
 
 ```python
-from llama_index.llama_pack import download_llama_pack
+from llama_index.core.llama_pack import download_llama_pack
 
 # download and install dependencies
 VannaPack = download_llama_pack("VannaPack", "./vanna_pack")
-- 
GitLab