diff --git a/apps/docs/docs/end_to_end.md b/apps/docs/docs/end_to_end.md index ec77b43fa81563d761126c7303b6b3e27e642102..8c5dc68c197d90d5a83bfacebc6596d3fa0afefa 100644 --- a/apps/docs/docs/end_to_end.md +++ b/apps/docs/docs/end_to_end.md @@ -10,14 +10,14 @@ We include several end-to-end examples using LlamaIndex.TS in the repository Read a file and chat about it with the LLM. -## [List Index](https://github.com/run-llama/LlamaIndexTS/blob/main/apps/simple/listIndex.ts) - -Create a list index and query it. This example also use the `LLMRetriever`, which will use the LLM to select the best nodes to use when generating answer. - ## [Vector Index](https://github.com/run-llama/LlamaIndexTS/blob/main/apps/simple/vectorIndex.ts) Create a vector index and query it. The vector index will use embeddings to fetch the top k most relevant nodes. By default, the top k is 2. +## [Summary Index](https://github.com/run-llama/LlamaIndexTS/blob/main/apps/simple/summarIndex.ts) + +Create a list index and query it. This example also use the `LLMRetriever`, which will use the LLM to select the best nodes to use when generating answer. + ## [Save / Load an Index](https://github.com/run-llama/LlamaIndexTS/blob/main/apps/simple/storageContext.ts) Create and load a vector index. Persistance to disk in LlamaIndex.TS happens automatically once a storage context object is created. @@ -28,7 +28,7 @@ Create a vector index and query it, while also configuring the the `LLM`, the `S ## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/apps/simple/openai.ts) -Create an OpenAI LLM and directly use it for chat. +Create an OpenAI LLM and directly use it for chat. ## [Llama2 DeuceLLM](https://github.com/run-llama/LlamaIndexTS/blob/main/apps/simple/llamadeuce.ts) @@ -40,4 +40,4 @@ Uses the `SubQuestionQueryEngine`, which breaks complex queries into multiple qu ## [Low Level Modules](https://github.com/run-llama/LlamaIndexTS/blob/main/apps/simple/lowlevel.ts) -This example uses several low-level components, which removes the need for an actual query engine. These components can be used anywhere, in any application, or customized and sub-classed to meet your own needs. \ No newline at end of file +This example uses several low-level components, which removes the need for an actual query engine. These components can be used anywhere, in any application, or customized and sub-classed to meet your own needs. diff --git a/apps/docs/docs/modules/high_level/data_index.md b/apps/docs/docs/modules/high_level/data_index.md index bcb2ef4962bf526fa06253d4a85d4a4f65105a9f..a469817db77d2f371aeaa039530216a8c2531343 100644 --- a/apps/docs/docs/modules/high_level/data_index.md +++ b/apps/docs/docs/modules/high_level/data_index.md @@ -6,23 +6,18 @@ sidebar_position: 2 An index is the basic container and organization for your data. LlamaIndex.TS supports two indexes: -- `ListIndex` - will send every `Node` in the index to the LLM in order to generate a response - `VectorStoreIndex` - will send the top-k `Node`s to the LLM when generating a response. The default top-k is 2. +- `SummaryIndex` - will send every `Node` in the index to the LLM in order to generate a response ```typescript -import { - Document, - VectorStoreIndex, -} from "llamaindex"; +import { Document, VectorStoreIndex } from "llamaindex"; const document = new Document({ text: "test" }); -const index = await VectorStoreIndex.fromDocuments( - [document] -); +const index = await VectorStoreIndex.fromDocuments([document]); ``` ## API Reference -- [ListIndex](../../api/classes/ListIndex.md) -- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md) \ No newline at end of file +- [SummaryIndex](../../api/classes/SummaryIndex.md) +- [VectorStoreIndex](../../api/classes/VectorStoreIndex.md) diff --git a/apps/docs/docs/modules/low_level/retriever.md b/apps/docs/docs/modules/low_level/retriever.md index 39d3851822af5f74f804cc747ddf3b78a43b9a81..9ead0602db47a3c28dbd432421c2934a221cf128 100644 --- a/apps/docs/docs/modules/low_level/retriever.md +++ b/apps/docs/docs/modules/low_level/retriever.md @@ -4,10 +4,10 @@ sidebar_position: 5 # Retriever -A retriever in LlamaIndex is what is used to fetch `Node`s from an index using a query string. For example, a `ListIndexRetriever` will fetch all nodes no matter the query. Meanwhile, a `VectorIndexRetriever` will only fetch the top-k most similar nodes. +A retriever in LlamaIndex is what is used to fetch `Node`s from an index using a query string. Aa `VectorIndexRetriever` will fetch the top-k most similar nodes. Meanwhile, a `SummaryIndexRetriever` will fetch all nodes no matter the query. ```typescript -const retriever = vector_index.asRetriever() +const retriever = vector_index.asRetriever(); retriever.similarityTopK = 3; // Fetch nodes! @@ -16,6 +16,6 @@ const nodesWithScore = await retriever.retrieve("query string"); ## API Reference -- [ListIndexRetriever](../../api/classes/ListIndexRetriever.md) -- [ListIndexLLMRetriever](../../api/classes/ListIndexLLMRetriever.md) +- [SummaryIndexRetriever](../../api/classes/SummaryIndexRetriever.md) +- [SummaryIndexLLMRetriever](../../api/classes/SummaryIndexLLMRetriever.md) - [VectorIndexRetriever](../../api/classes/VectorIndexRetriever.md) diff --git a/packages/core/src/Prompt.ts b/packages/core/src/Prompt.ts index 86e1a4fd4c8a9d0c96c8c4547567e48f135698f8..262096198e24ff61a74d78e2b79cbca5508487a7 100644 --- a/packages/core/src/Prompt.ts +++ b/packages/core/src/Prompt.ts @@ -7,7 +7,9 @@ import { ToolMetadata } from "./Tool"; * NOTE this is a different interface compared to LlamaIndex Python * NOTE 2: we default to empty string to make it easy to calculate prompt sizes */ -export type SimplePrompt = (input: Record<string, string>) => string; +export type SimplePrompt = ( + input: Record<string, string | undefined>, +) => string; /* DEFAULT_TEXT_QA_PROMPT_TMPL = (