From 33c8c2fe4748ba3a6e237cac6414d13059f0f9a6 Mon Sep 17 00:00:00 2001
From: Yi Ding <yi.s.ding@gmail.com>
Date: Tue, 29 Aug 2023 14:06:12 -0700
Subject: [PATCH] documentation update for SummaryIndex

---
 apps/docs/docs/end_to_end.md                    | 12 ++++++------
 apps/docs/docs/modules/high_level/data_index.md | 15 +++++----------
 apps/docs/docs/modules/low_level/retriever.md   |  8 ++++----
 packages/core/src/Prompt.ts                     |  4 +++-
 4 files changed, 18 insertions(+), 21 deletions(-)

diff --git a/apps/docs/docs/end_to_end.md b/apps/docs/docs/end_to_end.md
index ec77b43fa..8c5dc68c1 100644
--- a/apps/docs/docs/end_to_end.md
+++ b/apps/docs/docs/end_to_end.md
@@ -10,14 +10,14 @@ We include several end-to-end examples using LlamaIndex.TS in the repository
 
 Read a file and chat about it with the LLM.
 
-## [List Index](https://github.com/run-llama/LlamaIndexTS/blob/main/apps/simple/listIndex.ts)
-
-Create a list index and query it. This example also use the `LLMRetriever`, which will use the LLM to select the best nodes to use when generating answer.
-
 ## [Vector Index](https://github.com/run-llama/LlamaIndexTS/blob/main/apps/simple/vectorIndex.ts)
 
 Create a vector index and query it. The vector index will use embeddings to fetch the top k most relevant nodes. By default, the top k is 2.
 
+## [Summary Index](https://github.com/run-llama/LlamaIndexTS/blob/main/apps/simple/summarIndex.ts)
+
+Create a list index and query it. This example also use the `LLMRetriever`, which will use the LLM to select the best nodes to use when generating answer.
+
 ## [Save / Load an Index](https://github.com/run-llama/LlamaIndexTS/blob/main/apps/simple/storageContext.ts)
 
 Create and load a vector index. Persistance to disk in LlamaIndex.TS happens automatically once a storage context object is created.
@@ -28,7 +28,7 @@ Create a vector index and query it, while also configuring the the `LLM`, the `S
 
 ## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/apps/simple/openai.ts)
 
-Create an OpenAI LLM and directly use it for chat. 
+Create an OpenAI LLM and directly use it for chat.
 
 ## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/apps/simple/llamadeuce.ts)
 
@@ -40,4 +40,4 @@ Uses the `SubQuestionQueryEngine`, which breaks complex queries into multiple qu
 
 ## [Low Level Modules](https://github.com/run-llama/LlamaIndexTS/blob/main/apps/simple/lowlevel.ts)
 
-This example uses several low-level components, which removes the need for an actual query engine. These components can be used anywhere, in any application, or customized and sub-classed to meet your own needs.
\ No newline at end of file
+This example uses several low-level components, which removes the need for an actual query engine. These components can be used anywhere, in any application, or customized and sub-classed to meet your own needs.
diff --git a/apps/docs/docs/modules/high_level/data_index.md b/apps/docs/docs/modules/high_level/data_index.md
index bcb2ef496..a469817db 100644
--- a/apps/docs/docs/modules/high_level/data_index.md
+++ b/apps/docs/docs/modules/high_level/data_index.md
@@ -6,23 +6,18 @@ sidebar_position: 2
 
 An index is the basic container and organization for your data. LlamaIndex.TS supports two indexes:
 
-- `ListIndex` - will send every `Node` in the index to the LLM in order to generate a response
 - `VectorStoreIndex` - will send the top-k `Node`s to the LLM when generating a response. The default top-k is 2.
+- `SummaryIndex` - will send every `Node` in the index to the LLM in order to generate a response
 
 ```typescript
-import {
-  Document,
-  VectorStoreIndex,
-} from "llamaindex";
+import { Document, VectorStoreIndex } from "llamaindex";
 
 const document = new Document({ text: "test" });
 
-const index = await VectorStoreIndex.fromDocuments(
-  [document]
-);
+const index = await VectorStoreIndex.fromDocuments([document]);
 ```
 
 ## API Reference
 
-- [ListIndex](../../api/classes/ListIndex.md)
-- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
\ No newline at end of file
+- [SummaryIndex](../../api/classes/SummaryIndex.md)
+- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md)
diff --git a/apps/docs/docs/modules/low_level/retriever.md b/apps/docs/docs/modules/low_level/retriever.md
index 39d385182..9ead0602d 100644
--- a/apps/docs/docs/modules/low_level/retriever.md
+++ b/apps/docs/docs/modules/low_level/retriever.md
@@ -4,10 +4,10 @@ sidebar_position: 5
 
 # Retriever
 
-A retriever in LlamaIndex is what is used to fetch `Node`s from an index using a query string. For example, a `ListIndexRetriever` will fetch all nodes no matter the query. Meanwhile, a `VectorIndexRetriever` will only fetch the top-k most similar nodes.
+A retriever in LlamaIndex is what is used to fetch `Node`s from an index using a query string. Aa `VectorIndexRetriever` will fetch the top-k most similar nodes. Meanwhile, a `SummaryIndexRetriever` will fetch all nodes no matter the query.
 
 ```typescript
-const retriever = vector_index.asRetriever()
+const retriever = vector_index.asRetriever();
 retriever.similarityTopK = 3;
 
 // Fetch nodes!
@@ -16,6 +16,6 @@ const nodesWithScore = await retriever.retrieve("query string");
 
 ## API Reference
 
-- [ListIndexRetriever](../../api/classes/ListIndexRetriever.md)
-- [ListIndexLLMRetriever](../../api/classes/ListIndexLLMRetriever.md)
+- [SummaryIndexRetriever](../../api/classes/SummaryIndexRetriever.md)
+- [SummaryIndexLLMRetriever](../../api/classes/SummaryIndexLLMRetriever.md)
 - [VectorIndexRetriever](../../api/classes/VectorIndexRetriever.md)
diff --git a/packages/core/src/Prompt.ts b/packages/core/src/Prompt.ts
index 86e1a4fd4..262096198 100644
--- a/packages/core/src/Prompt.ts
+++ b/packages/core/src/Prompt.ts
@@ -7,7 +7,9 @@ import { ToolMetadata } from "./Tool";
  * NOTE this is a different interface compared to LlamaIndex Python
  * NOTE 2: we default to empty string to make it easy to calculate prompt sizes
  */
-export type SimplePrompt = (input: Record<string, string>) => string;
+export type SimplePrompt = (
+  input: Record<string, string | undefined>,
+) => string;
 
 /*
 DEFAULT_TEXT_QA_PROMPT_TMPL = (
-- 
GitLab