diff --git a/.changeset/calm-eggs-type.md b/.changeset/calm-eggs-type.md
new file mode 100644
index 0000000000000000000000000000000000000000..4ac61d8da39347e49a0b10c616024f4e03da2dcb
--- /dev/null
+++ b/.changeset/calm-eggs-type.md
@@ -0,0 +1,7 @@
+---
+"@llamaindex/readers": patch
+"@llamaindex/core": patch
+"@llamaindex/doc": patch
+---
+
+Expose more content to fix the issue with unavailable documentation links, and adjust the documentation based on the latest code.
diff --git a/apps/next/scripts/validate-links.mts b/apps/next/scripts/validate-links.mts
index cafaee980fad5621bece6f9a6cdecc3cc452010e..86a1393fe93aaa2e432c661091d618022c5c087c 100644
--- a/apps/next/scripts/validate-links.mts
+++ b/apps/next/scripts/validate-links.mts
@@ -162,7 +162,12 @@ async function validateLinks(): Promise<LinkValidationResult[]> {
     const invalidLinks = links.filter(({ link }) => {
       // Check if the link exists in valid routes
       // First normalize the link (remove any query string or hash)
-      const normalizedLink = link.split("#")[0].split("?")[0];
+      const baseLink = link.split("?")[0].split("#")[0];
+      // Remove the trailing slash if present.
+      // This works with links like "api/interfaces/MetadataFilter#operator" and "api/interfaces/MetadataFilter/#operator".
+      const normalizedLink = baseLink.endsWith("/")
+        ? baseLink.slice(0, -1)
+        : baseLink;
 
       // Remove llamaindex/ prefix if it exists as it's the root of the docs
       let routePath = normalizedLink;
@@ -192,8 +197,7 @@ async function main() {
 
   try {
     // Check for invalid internal links
-    const validationResults: LinkValidationResult[] = [];
-    await validateLinks();
+    const validationResults: LinkValidationResult[] = await validateLinks();
     // Check for relative links
     const relativeLinksResults = await findRelativeLinks();
 
diff --git a/apps/next/src/content/docs/llamaindex/modules/data_loaders/index.mdx b/apps/next/src/content/docs/llamaindex/modules/data_loaders/index.mdx
index 295540506e9055666f79001e47a870625d613f43..188ddba65b3b4fa35410c5a9e822f23bbcff058a 100644
--- a/apps/next/src/content/docs/llamaindex/modules/data_loaders/index.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/data_loaders/index.mdx
@@ -35,7 +35,7 @@ Currently, the following readers are mapped to specific file types:
 
 - [TextFileReader](/docs/api/classes/TextFileReader): `.txt`
 - [PDFReader](/docs/api/classes/PDFReader): `.pdf`
-- [PapaCSVReader](/docs/api/classes/PapaCSVReader): `.csv`
+- [CSVReader](/docs/api/classes/CSVReader): `.csv`
 - [MarkdownReader](/docs/api/classes/MarkdownReader): `.md`
 - [DocxReader](/docs/api/classes/DocxReader): `.docx`
 - [HTMLReader](/docs/api/classes/HTMLReader): `.htm`, `.html`
diff --git a/apps/next/src/content/docs/llamaindex/modules/data_stores/chat_stores/index.mdx b/apps/next/src/content/docs/llamaindex/modules/data_stores/chat_stores/index.mdx
index cadcc2ad4170a5b8efca631e072af6dcacb0951e..4fe5e8f3ebad4476d62a14ae4f916346313d92bc 100644
--- a/apps/next/src/content/docs/llamaindex/modules/data_stores/chat_stores/index.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/data_stores/chat_stores/index.mdx
@@ -12,5 +12,5 @@ Check the [LlamaIndexTS Github](https://github.com/run-llama/LlamaIndexTS) for t
 
 ## API Reference
 
-- [BaseChatStore](/docs/api/interfaces/BaseChatStore)
+- [BaseChatStore](/docs/api/classes/BaseChatStore)
 
diff --git a/apps/next/src/content/docs/llamaindex/modules/evaluation/correctness.mdx b/apps/next/src/content/docs/llamaindex/modules/evaluation/correctness.mdx
index 50cb3c856ad3b37a4858c67fe57b4e2ded5ad6c5..c1189dfddb56f14cd7ff685b9851c3e50f278071 100644
--- a/apps/next/src/content/docs/llamaindex/modules/evaluation/correctness.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/evaluation/correctness.mdx
@@ -74,4 +74,4 @@ the response is not correct with a score of 2.5
 
 ## API Reference
 
-- [CorrectnessEvaluator](/docs/api/classes/CorrectnessEvaluator)
+- [CorrectnessEvaluator](/docs/api/classes/CorrectnessEvaluator)
\ No newline at end of file
diff --git a/apps/next/src/content/docs/llamaindex/modules/prompt/index.mdx b/apps/next/src/content/docs/llamaindex/modules/prompt/index.mdx
index f26d0dd3cc7fcd390758efb83cdbf405ef92d2c6..d53ff387cf00e03262aa37d799a3fd01f7a94fec 100644
--- a/apps/next/src/content/docs/llamaindex/modules/prompt/index.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/prompt/index.mdx
@@ -28,14 +28,21 @@ Answer:`;
 
 ### 1. Customizing the default prompt on initialization
 
-The first method is to create a new instance of `ResponseSynthesizer` (or the module you would like to update the prompt) and pass the custom prompt to the `responseBuilder` parameter. Then, pass the instance to the `asQueryEngine` method of the index.
+The first method is to create a new instance of a Response Synthesizer (or the module you would like to update the prompt) by using the getResponseSynthesizer function. Instead of passing the custom prompt to the deprecated responseBuilder parameter, call getResponseSynthesizer with the mode as the first argument and supply the new prompt via the options parameter.
 
 ```ts
-// Create an instance of response synthesizer
+// Create an instance of Response Synthesizer
+
+// Deprecated usage:
 const responseSynthesizer = new ResponseSynthesizer({
   responseBuilder: new CompactAndRefine(undefined, newTextQaPrompt),
 });
 
+// Current usage:
+const responseSynthesizer = getResponseSynthesizer('compact', {
+  textQATemplate: newTextQaPrompt
+})
+
 // Create index
 const index = await VectorStoreIndex.fromDocuments([document]);
 
@@ -75,5 +82,5 @@ const response = await queryEngine.query({
 
 ## API Reference
 
-- [ResponseSynthesizer](/docs/api/classes/ResponseSynthesizer)
+- [Response Synthesizer](/docs/llamaindex/modules/response_synthesizer)
 - [CompactAndRefine](/docs/api/classes/CompactAndRefine)
diff --git a/apps/next/src/content/docs/llamaindex/modules/response_synthesizer.mdx b/apps/next/src/content/docs/llamaindex/modules/response_synthesizer.mdx
index bda0d53bfdb70fb5f2886256a66c0f5551f93b2b..8e94f5bd7f253e1c0cd661198108bc242d45e509 100644
--- a/apps/next/src/content/docs/llamaindex/modules/response_synthesizer.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/response_synthesizer.mdx
@@ -1,5 +1,5 @@
 ---
-title: ResponseSynthesizer
+title: Response Synthesizer
 ---
 
 The ResponseSynthesizer is responsible for sending the query, nodes, and prompt templates to the LLM to generate a response. There are a few key modes for generating a response:
@@ -12,15 +12,17 @@ The ResponseSynthesizer is responsible for sending the query, nodes, and prompt
   multiple compact prompts. The same as `refine`, but should result in less LLM calls.
 - `TreeSummarize`: Given a set of text chunks and the query, recursively construct a tree
   and return the root node as the response. Good for summarization purposes.
-- `SimpleResponseBuilder`: Given a set of text chunks and the query, apply the query to each text
-  chunk while accumulating the responses into an array. Returns a concatenated string of all
-  responses. Good for when you need to run the same query separately against each text
-  chunk.
+- `MultiModal`: Combines textual inputs with additional modality-specific metadata to generate an integrated response. 
+  It leverages a text QA template to build a prompt that incorporates various input types and produces either streaming or complete responses.
+  This approach is ideal for use cases where enriching the answer with multi-modal context (such as images, audio, or other data) 
+  can enhance the output quality.
 
 ```typescript
-import { NodeWithScore, TextNode, ResponseSynthesizer } from "llamaindex";
+import { NodeWithScore, TextNode, getResponseSynthesizer, responseModeSchema } from "llamaindex";
 
-const responseSynthesizer = new ResponseSynthesizer();
+// you can also use responseModeSchema.Enum.refine, responseModeSchema.Enum.tree_summarize, responseModeSchema.Enum.multi_modal
+// or you can use the CompactAndRefine, Refine, TreeSummarize, or MultiModal classes directly
+const responseSynthesizer = getResponseSynthesizer(responseModeSchema.Enum.compact);
 
 const nodesWithScore: NodeWithScore[] = [
   {
@@ -55,8 +57,9 @@ for await (const chunk of stream) {
 
 ## API Reference
 
-- [ResponseSynthesizer](/docs/api/classes/ResponseSynthesizer)
+- [getResponseSynthesizer](/docs/api/functions/getResponseSynthesizer)
+- [responseModeSchema](/docs/api/variables/responseModeSchema)
 - [Refine](/docs/api/classes/Refine)
 - [CompactAndRefine](/docs/api/classes/CompactAndRefine)
 - [TreeSummarize](/docs/api/classes/TreeSummarize)
-- [SimpleResponseBuilder](/docs/api/classes/SimpleResponseBuilder)
+- [MultiModal](/docs/api/classes/MultiModal)
diff --git a/apps/next/typedoc.json b/apps/next/typedoc.json
index a545e44cd683978a62f47ecb66ba674877e1b30f..5b79b46f71ca4dff3265e8ebe3c7ef23f8965795 100644
--- a/apps/next/typedoc.json
+++ b/apps/next/typedoc.json
@@ -1,8 +1,13 @@
 {
   "plugin": ["typedoc-plugin-markdown", "typedoc-plugin-merge-modules"],
-  "entryPoints": ["../../packages/**/src/index.ts"],
+  "entryPoints": [
+    "../../packages/{,**/}index.ts",
+    "../../packages/readers/src/*.ts",
+    "../../packages/cloud/src/{reader,utils}.ts"
+  ],
   "exclude": [
     "../../packages/autotool/**/src/index.ts",
+    "../../packages/cloud/src/client/index.ts",
     "**/node_modules/**",
     "**/dist/**",
     "**/test/**",
diff --git a/packages/core/src/response-synthesizers/factory.ts b/packages/core/src/response-synthesizers/factory.ts
index 0e6ae17537ed3c794c6ac368e59a2159b0f8fabd..7f9af5f4ea40ead5430b09d8af02adc6b8a8c1ff 100644
--- a/packages/core/src/response-synthesizers/factory.ts
+++ b/packages/core/src/response-synthesizers/factory.ts
@@ -23,7 +23,7 @@ import {
 } from "./base-synthesizer";
 import { createMessageContent } from "./utils";
 
-const responseModeSchema = z.enum([
+export const responseModeSchema = z.enum([
   "refine",
   "compact",
   "tree_summarize",
@@ -35,7 +35,7 @@ export type ResponseMode = z.infer<typeof responseModeSchema>;
 /**
  * A response builder that uses the query to ask the LLM generate a better response using multiple text chunks.
  */
-class Refine extends BaseSynthesizer {
+export class Refine extends BaseSynthesizer {
   textQATemplate: TextQAPrompt;
   refineTemplate: RefinePrompt;
 
@@ -213,7 +213,7 @@ class Refine extends BaseSynthesizer {
 /**
  * CompactAndRefine is a slight variation of Refine that first compacts the text chunks into the smallest possible number of chunks.
  */
-class CompactAndRefine extends Refine {
+export class CompactAndRefine extends Refine {
   async getResponse(
     query: MessageContent,
     nodes: NodeWithScore[],
@@ -267,7 +267,7 @@ class CompactAndRefine extends Refine {
 /**
  * TreeSummarize repacks the text chunks into the smallest possible number of chunks and then summarizes them, then recursively does so until there's one chunk left.
  */
-class TreeSummarize extends BaseSynthesizer {
+export class TreeSummarize extends BaseSynthesizer {
   summaryTemplate: TreeSummarizePrompt;
 
   constructor(
@@ -370,7 +370,7 @@ class TreeSummarize extends BaseSynthesizer {
   }
 }
 
-class MultiModal extends BaseSynthesizer {
+export class MultiModal extends BaseSynthesizer {
   metadataMode: MetadataMode;
   textQATemplate: TextQAPrompt;
 
diff --git a/packages/core/src/response-synthesizers/index.ts b/packages/core/src/response-synthesizers/index.ts
index a782d514f18ba7b7fd0f6410a499345153c3bbd5..907958b237786f7f19c295aaf8898599b55a691c 100644
--- a/packages/core/src/response-synthesizers/index.ts
+++ b/packages/core/src/response-synthesizers/index.ts
@@ -2,7 +2,15 @@ export {
   BaseSynthesizer,
   type BaseSynthesizerOptions,
 } from "./base-synthesizer";
-export { getResponseSynthesizer, type ResponseMode } from "./factory";
+export {
+  CompactAndRefine,
+  MultiModal,
+  Refine,
+  TreeSummarize,
+  getResponseSynthesizer,
+  responseModeSchema,
+  type ResponseMode,
+} from "./factory";
 export type {
   SynthesizeEndEvent,
   SynthesizeQuery,
diff --git a/packages/readers/package.json b/packages/readers/package.json
index 9a7d6338bca7d67b47ce6e85caaecc53adddb922..80953af9c912346478d0ab392d68e88a7f41461d 100644
--- a/packages/readers/package.json
+++ b/packages/readers/package.json
@@ -230,7 +230,6 @@
     "mammoth": "^1.7.2",
     "mongodb": "^6.7.0",
     "notion-md-crawler": "^1.0.0",
-    "papaparse": "^5.4.1",
     "unpdf": "^0.12.1"
   }
 }
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index c828d9063420ef4c80489cda1c64fd447174e2cb..a5ff7b477b6d45f3edb78fdb85be764b78697b8a 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -1695,9 +1695,6 @@ importers:
       notion-md-crawler:
         specifier: ^1.0.0
         version: 1.0.1
-      papaparse:
-        specifier: ^5.4.1
-        version: 5.5.2
       unpdf:
         specifier: ^0.12.1
         version: 0.12.1
@@ -9710,9 +9707,6 @@ packages:
   pako@1.0.11:
     resolution: {integrity: sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==}
 
-  papaparse@5.5.2:
-    resolution: {integrity: sha512-PZXg8UuAc4PcVwLosEEDYjPyfWnTEhOrUfdv+3Bx+NuAb+5NhDmXzg5fHWmdCh1mP5p7JAZfFr3IMQfcntNAdA==}
-
   parent-module@1.0.1:
     resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==}
     engines: {node: '>=6'}
@@ -22183,8 +22177,6 @@ snapshots:
 
   pako@1.0.11: {}
 
-  papaparse@5.5.2: {}
-
   parent-module@1.0.1:
     dependencies:
       callsites: 3.1.0