From 21bebfcaa6b3e5b2cd0ff22865794f4662e01108 Mon Sep 17 00:00:00 2001
From: Jack Qian <jackqiangc@gmail.com>
Date: Sun, 16 Mar 2025 14:32:19 +0800
Subject: [PATCH] docs: add missing links (#1754)

---
 .changeset/calm-eggs-type.md                  |  7 +++++++
 apps/next/scripts/validate-links.mts          | 10 ++++++---
 .../llamaindex/modules/data_loaders/index.mdx |  2 +-
 .../modules/data_stores/chat_stores/index.mdx |  2 +-
 .../modules/evaluation/correctness.mdx        |  2 +-
 .../docs/llamaindex/modules/prompt/index.mdx  | 13 +++++++++---
 .../modules/response_synthesizer.mdx          | 21 +++++++++++--------
 apps/next/typedoc.json                        |  7 ++++++-
 .../core/src/response-synthesizers/factory.ts | 10 ++++-----
 .../core/src/response-synthesizers/index.ts   | 10 ++++++++-
 packages/readers/package.json                 |  1 -
 pnpm-lock.yaml                                |  8 -------
 12 files changed, 59 insertions(+), 34 deletions(-)
 create mode 100644 .changeset/calm-eggs-type.md

diff --git a/.changeset/calm-eggs-type.md b/.changeset/calm-eggs-type.md
new file mode 100644
index 000000000..4ac61d8da
--- /dev/null
+++ b/.changeset/calm-eggs-type.md
@@ -0,0 +1,7 @@
+---
+"@llamaindex/readers": patch
+"@llamaindex/core": patch
+"@llamaindex/doc": patch
+---
+
+Expose more content to fix the issue with unavailable documentation links, and adjust the documentation based on the latest code.
diff --git a/apps/next/scripts/validate-links.mts b/apps/next/scripts/validate-links.mts
index cafaee980..86a1393fe 100644
--- a/apps/next/scripts/validate-links.mts
+++ b/apps/next/scripts/validate-links.mts
@@ -162,7 +162,12 @@ async function validateLinks(): Promise<LinkValidationResult[]> {
     const invalidLinks = links.filter(({ link }) => {
       // Check if the link exists in valid routes
       // First normalize the link (remove any query string or hash)
-      const normalizedLink = link.split("#")[0].split("?")[0];
+      const baseLink = link.split("?")[0].split("#")[0];
+      // Remove the trailing slash if present.
+      // This works with links like "api/interfaces/MetadataFilter#operator" and "api/interfaces/MetadataFilter/#operator".
+      const normalizedLink = baseLink.endsWith("/")
+        ? baseLink.slice(0, -1)
+        : baseLink;
 
       // Remove llamaindex/ prefix if it exists as it's the root of the docs
       let routePath = normalizedLink;
@@ -192,8 +197,7 @@ async function main() {
 
   try {
     // Check for invalid internal links
-    const validationResults: LinkValidationResult[] = [];
-    await validateLinks();
+    const validationResults: LinkValidationResult[] = await validateLinks();
     // Check for relative links
     const relativeLinksResults = await findRelativeLinks();
 
diff --git a/apps/next/src/content/docs/llamaindex/modules/data_loaders/index.mdx b/apps/next/src/content/docs/llamaindex/modules/data_loaders/index.mdx
index 295540506..188ddba65 100644
--- a/apps/next/src/content/docs/llamaindex/modules/data_loaders/index.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/data_loaders/index.mdx
@@ -35,7 +35,7 @@ Currently, the following readers are mapped to specific file types:
 
 - [TextFileReader](/docs/api/classes/TextFileReader): `.txt`
 - [PDFReader](/docs/api/classes/PDFReader): `.pdf`
-- [PapaCSVReader](/docs/api/classes/PapaCSVReader): `.csv`
+- [CSVReader](/docs/api/classes/CSVReader): `.csv`
 - [MarkdownReader](/docs/api/classes/MarkdownReader): `.md`
 - [DocxReader](/docs/api/classes/DocxReader): `.docx`
 - [HTMLReader](/docs/api/classes/HTMLReader): `.htm`, `.html`
diff --git a/apps/next/src/content/docs/llamaindex/modules/data_stores/chat_stores/index.mdx b/apps/next/src/content/docs/llamaindex/modules/data_stores/chat_stores/index.mdx
index cadcc2ad4..4fe5e8f3e 100644
--- a/apps/next/src/content/docs/llamaindex/modules/data_stores/chat_stores/index.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/data_stores/chat_stores/index.mdx
@@ -12,5 +12,5 @@ Check the [LlamaIndexTS Github](https://github.com/run-llama/LlamaIndexTS) for t
 
 ## API Reference
 
-- [BaseChatStore](/docs/api/interfaces/BaseChatStore)
+- [BaseChatStore](/docs/api/classes/BaseChatStore)
 
diff --git a/apps/next/src/content/docs/llamaindex/modules/evaluation/correctness.mdx b/apps/next/src/content/docs/llamaindex/modules/evaluation/correctness.mdx
index 50cb3c856..c1189dfdd 100644
--- a/apps/next/src/content/docs/llamaindex/modules/evaluation/correctness.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/evaluation/correctness.mdx
@@ -74,4 +74,4 @@ the response is not correct with a score of 2.5
 
 ## API Reference
 
-- [CorrectnessEvaluator](/docs/api/classes/CorrectnessEvaluator)
+- [CorrectnessEvaluator](/docs/api/classes/CorrectnessEvaluator)
\ No newline at end of file
diff --git a/apps/next/src/content/docs/llamaindex/modules/prompt/index.mdx b/apps/next/src/content/docs/llamaindex/modules/prompt/index.mdx
index f26d0dd3c..d53ff387c 100644
--- a/apps/next/src/content/docs/llamaindex/modules/prompt/index.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/prompt/index.mdx
@@ -28,14 +28,21 @@ Answer:`;
 
 ### 1. Customizing the default prompt on initialization
 
-The first method is to create a new instance of `ResponseSynthesizer` (or the module you would like to update the prompt) and pass the custom prompt to the `responseBuilder` parameter. Then, pass the instance to the `asQueryEngine` method of the index.
+The first method is to create a new instance of a Response Synthesizer (or the module you would like to update the prompt) by using the getResponseSynthesizer function. Instead of passing the custom prompt to the deprecated responseBuilder parameter, call getResponseSynthesizer with the mode as the first argument and supply the new prompt via the options parameter.
 
 ```ts
-// Create an instance of response synthesizer
+// Create an instance of Response Synthesizer
+
+// Deprecated usage:
 const responseSynthesizer = new ResponseSynthesizer({
   responseBuilder: new CompactAndRefine(undefined, newTextQaPrompt),
 });
 
+// Current usage:
+const responseSynthesizer = getResponseSynthesizer('compact', {
+  textQATemplate: newTextQaPrompt
+})
+
 // Create index
 const index = await VectorStoreIndex.fromDocuments([document]);
 
@@ -75,5 +82,5 @@ const response = await queryEngine.query({
 
 ## API Reference
 
-- [ResponseSynthesizer](/docs/api/classes/ResponseSynthesizer)
+- [Response Synthesizer](/docs/llamaindex/modules/response_synthesizer)
 - [CompactAndRefine](/docs/api/classes/CompactAndRefine)
diff --git a/apps/next/src/content/docs/llamaindex/modules/response_synthesizer.mdx b/apps/next/src/content/docs/llamaindex/modules/response_synthesizer.mdx
index bda0d53bf..8e94f5bd7 100644
--- a/apps/next/src/content/docs/llamaindex/modules/response_synthesizer.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/response_synthesizer.mdx
@@ -1,5 +1,5 @@
 ---
-title: ResponseSynthesizer
+title: Response Synthesizer
 ---
 
 The ResponseSynthesizer is responsible for sending the query, nodes, and prompt templates to the LLM to generate a response. There are a few key modes for generating a response:
@@ -12,15 +12,17 @@ The ResponseSynthesizer is responsible for sending the query, nodes, and prompt
   multiple compact prompts. The same as `refine`, but should result in less LLM calls.
 - `TreeSummarize`: Given a set of text chunks and the query, recursively construct a tree
   and return the root node as the response. Good for summarization purposes.
-- `SimpleResponseBuilder`: Given a set of text chunks and the query, apply the query to each text
-  chunk while accumulating the responses into an array. Returns a concatenated string of all
-  responses. Good for when you need to run the same query separately against each text
-  chunk.
+- `MultiModal`: Combines textual inputs with additional modality-specific metadata to generate an integrated response. 
+  It leverages a text QA template to build a prompt that incorporates various input types and produces either streaming or complete responses.
+  This approach is ideal for use cases where enriching the answer with multi-modal context (such as images, audio, or other data) 
+  can enhance the output quality.
 
 ```typescript
-import { NodeWithScore, TextNode, ResponseSynthesizer } from "llamaindex";
+import { NodeWithScore, TextNode, getResponseSynthesizer, responseModeSchema } from "llamaindex";
 
-const responseSynthesizer = new ResponseSynthesizer();
+// you can also use responseModeSchema.Enum.refine, responseModeSchema.Enum.tree_summarize, responseModeSchema.Enum.multi_modal
+// or you can use the CompactAndRefine, Refine, TreeSummarize, or MultiModal classes directly
+const responseSynthesizer = getResponseSynthesizer(responseModeSchema.Enum.compact);
 
 const nodesWithScore: NodeWithScore[] = [
   {
@@ -55,8 +57,9 @@ for await (const chunk of stream) {
 
 ## API Reference
 
-- [ResponseSynthesizer](/docs/api/classes/ResponseSynthesizer)
+- [getResponseSynthesizer](/docs/api/functions/getResponseSynthesizer)
+- [responseModeSchema](/docs/api/variables/responseModeSchema)
 - [Refine](/docs/api/classes/Refine)
 - [CompactAndRefine](/docs/api/classes/CompactAndRefine)
 - [TreeSummarize](/docs/api/classes/TreeSummarize)
-- [SimpleResponseBuilder](/docs/api/classes/SimpleResponseBuilder)
+- [MultiModal](/docs/api/classes/MultiModal)
diff --git a/apps/next/typedoc.json b/apps/next/typedoc.json
index a545e44cd..5b79b46f7 100644
--- a/apps/next/typedoc.json
+++ b/apps/next/typedoc.json
@@ -1,8 +1,13 @@
 {
   "plugin": ["typedoc-plugin-markdown", "typedoc-plugin-merge-modules"],
-  "entryPoints": ["../../packages/**/src/index.ts"],
+  "entryPoints": [
+    "../../packages/{,**/}index.ts",
+    "../../packages/readers/src/*.ts",
+    "../../packages/cloud/src/{reader,utils}.ts"
+  ],
   "exclude": [
     "../../packages/autotool/**/src/index.ts",
+    "../../packages/cloud/src/client/index.ts",
     "**/node_modules/**",
     "**/dist/**",
     "**/test/**",
diff --git a/packages/core/src/response-synthesizers/factory.ts b/packages/core/src/response-synthesizers/factory.ts
index 0e6ae1753..7f9af5f4e 100644
--- a/packages/core/src/response-synthesizers/factory.ts
+++ b/packages/core/src/response-synthesizers/factory.ts
@@ -23,7 +23,7 @@ import {
 } from "./base-synthesizer";
 import { createMessageContent } from "./utils";
 
-const responseModeSchema = z.enum([
+export const responseModeSchema = z.enum([
   "refine",
   "compact",
   "tree_summarize",
@@ -35,7 +35,7 @@ export type ResponseMode = z.infer<typeof responseModeSchema>;
 /**
  * A response builder that uses the query to ask the LLM generate a better response using multiple text chunks.
  */
-class Refine extends BaseSynthesizer {
+export class Refine extends BaseSynthesizer {
   textQATemplate: TextQAPrompt;
   refineTemplate: RefinePrompt;
 
@@ -213,7 +213,7 @@ class Refine extends BaseSynthesizer {
 /**
  * CompactAndRefine is a slight variation of Refine that first compacts the text chunks into the smallest possible number of chunks.
  */
-class CompactAndRefine extends Refine {
+export class CompactAndRefine extends Refine {
   async getResponse(
     query: MessageContent,
     nodes: NodeWithScore[],
@@ -267,7 +267,7 @@ class CompactAndRefine extends Refine {
 /**
  * TreeSummarize repacks the text chunks into the smallest possible number of chunks and then summarizes them, then recursively does so until there's one chunk left.
  */
-class TreeSummarize extends BaseSynthesizer {
+export class TreeSummarize extends BaseSynthesizer {
   summaryTemplate: TreeSummarizePrompt;
 
   constructor(
@@ -370,7 +370,7 @@ class TreeSummarize extends BaseSynthesizer {
   }
 }
 
-class MultiModal extends BaseSynthesizer {
+export class MultiModal extends BaseSynthesizer {
   metadataMode: MetadataMode;
   textQATemplate: TextQAPrompt;
 
diff --git a/packages/core/src/response-synthesizers/index.ts b/packages/core/src/response-synthesizers/index.ts
index a782d514f..907958b23 100644
--- a/packages/core/src/response-synthesizers/index.ts
+++ b/packages/core/src/response-synthesizers/index.ts
@@ -2,7 +2,15 @@ export {
   BaseSynthesizer,
   type BaseSynthesizerOptions,
 } from "./base-synthesizer";
-export { getResponseSynthesizer, type ResponseMode } from "./factory";
+export {
+  CompactAndRefine,
+  MultiModal,
+  Refine,
+  TreeSummarize,
+  getResponseSynthesizer,
+  responseModeSchema,
+  type ResponseMode,
+} from "./factory";
 export type {
   SynthesizeEndEvent,
   SynthesizeQuery,
diff --git a/packages/readers/package.json b/packages/readers/package.json
index 9a7d6338b..80953af9c 100644
--- a/packages/readers/package.json
+++ b/packages/readers/package.json
@@ -230,7 +230,6 @@
     "mammoth": "^1.7.2",
     "mongodb": "^6.7.0",
     "notion-md-crawler": "^1.0.0",
-    "papaparse": "^5.4.1",
     "unpdf": "^0.12.1"
   }
 }
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index c828d9063..a5ff7b477 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -1695,9 +1695,6 @@ importers:
       notion-md-crawler:
         specifier: ^1.0.0
         version: 1.0.1
-      papaparse:
-        specifier: ^5.4.1
-        version: 5.5.2
       unpdf:
         specifier: ^0.12.1
         version: 0.12.1
@@ -9710,9 +9707,6 @@ packages:
   pako@1.0.11:
     resolution: {integrity: sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==}
 
-  papaparse@5.5.2:
-    resolution: {integrity: sha512-PZXg8UuAc4PcVwLosEEDYjPyfWnTEhOrUfdv+3Bx+NuAb+5NhDmXzg5fHWmdCh1mP5p7JAZfFr3IMQfcntNAdA==}
-
   parent-module@1.0.1:
     resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==}
     engines: {node: '>=6'}
@@ -22183,8 +22177,6 @@ snapshots:
 
   pako@1.0.11: {}
 
-  papaparse@5.5.2: {}
-
   parent-module@1.0.1:
     dependencies:
       callsites: 3.1.0
-- 
GitLab