diff --git a/.changeset/flat-mirrors-dream.md b/.changeset/flat-mirrors-dream.md new file mode 100644 index 0000000000000000000000000000000000000000..a0cb414cba77654f721e02635b0507769b4794a3 --- /dev/null +++ b/.changeset/flat-mirrors-dream.md @@ -0,0 +1,5 @@ +--- +"@llamaindex/doc": patch +--- + +chore: update examples and docs to use unified imports diff --git a/apps/next/src/app/(home)/page.tsx b/apps/next/src/app/(home)/page.tsx index f578a1302a6d8b0cfbbbe6428e637c1fd65d4a80..db5ca76b1e535b51b3c73bbcba1e3e56f1e13ccc 100644 --- a/apps/next/src/app/(home)/page.tsx +++ b/apps/next/src/app/(home)/page.tsx @@ -76,15 +76,19 @@ export default function HomePage() { > <MagicMove code={[ - `import { OpenAI } from "llamaindex"; + `import { OpenAI } from "@llamaindex/openai"; + const llm = new OpenAI(); const response = await llm.complete({ prompt: "How are you?" });`, - `import { OpenAI } from "llamaindex"; + `import { OpenAI } from "@llamaindex/openai"; + const llm = new OpenAI(); const response = await llm.chat({ messages: [{ content: "Tell me a joke.", role: "user" }], });`, - `import { OpenAI, ChatMemoryBuffer } from "llamaindex"; + `import { ChatMemoryBuffer } from "llamaindex"; +import { OpenAI } from "@llamaindex/openai"; + const llm = new OpenAI({ model: 'gpt4o-turbo' }); const buffer = new ChatMemoryBuffer({ tokenLimit: 128_000, @@ -94,7 +98,9 @@ const response = await llm.chat({ messages: buffer.getMessages(), stream: true });`, - `import { OpenAIAgent, ChatMemoryBuffer } from "llamaindex"; + `import { ChatMemoryBuffer } from "llamaindex"; +import { OpenAIAgent } from "@llamaindex/openai"; + const agent = new OpenAIAgent({ llm, tools: [...myTools] diff --git a/apps/next/src/content/docs/llamaindex/examples/agent_gemini.mdx b/apps/next/src/content/docs/llamaindex/examples/agent_gemini.mdx index 38bc4ef72d79bbd68b54816dd0ce8eec85a66862..e9caf43ac15459bbef21b185149b5c1b3dda3ea9 100644 --- a/apps/next/src/content/docs/llamaindex/examples/agent_gemini.mdx +++ b/apps/next/src/content/docs/llamaindex/examples/agent_gemini.mdx @@ -5,4 +5,24 @@ title: Gemini Agent import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock'; import CodeSourceGemini from "!raw-loader!../../../../../../../examples/gemini/agent.ts"; +## Installation + +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/google + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/google + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/google + ``` +</Tabs> + +## Source + <DynamicCodeBlock lang="ts" code={CodeSourceGemini} /> diff --git a/apps/next/src/content/docs/llamaindex/examples/context_aware_agent.mdx b/apps/next/src/content/docs/llamaindex/examples/context_aware_agent.mdx index 6aa5c5a4fbd7a755d94428eb7de5fc917a5047f0..6295c5f3ca07b74e061dd30c4b6f34e1a9dd5ccc 100644 --- a/apps/next/src/content/docs/llamaindex/examples/context_aware_agent.mdx +++ b/apps/next/src/content/docs/llamaindex/examples/context_aware_agent.mdx @@ -12,9 +12,8 @@ Here's a simple example of how to use the Context-Aware Agent: import { Document, VectorStoreIndex, - OpenAIContextAwareAgent, - OpenAI, } from "llamaindex"; +import { OpenAI, OpenAIContextAwareAgent } from "@llamaindex/openai"; async function createContextAwareAgent() { // Create and index some documents diff --git a/apps/next/src/content/docs/llamaindex/examples/other_llms.mdx b/apps/next/src/content/docs/llamaindex/examples/other_llms.mdx index 5fdc6bbaa0073bb7ec1e60f549b7049878c22f08..1adb04ff10fb0796fe460d233c3db4951504bdf6 100644 --- a/apps/next/src/content/docs/llamaindex/examples/other_llms.mdx +++ b/apps/next/src/content/docs/llamaindex/examples/other_llms.mdx @@ -7,14 +7,36 @@ import CodeSource from "!raw-loader!../../../../../../../examples/mistral"; By default LlamaIndex.TS uses OpenAI's LLMs and embedding models, but we support [lots of other LLMs](../modules/llms) including models from Mistral (Mistral, Mixtral), Anthropic (Claude) and Google (Gemini). -If you don't want to use an API at all you can [run a local model](../../examples/local_llm) +If you don't want to use an API at all you can [run a local model](../../examples/local_llm). + +This example runs you through the process of setting up a Mistral model: + + +## Installation + +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/mistral + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/mistral + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/mistral + ``` +</Tabs> ## Using another LLM You can specify what LLM LlamaIndex.TS will use on the `Settings` object, like this: ```typescript -import { MistralAI, Settings } from "llamaindex"; +import { MistralAI } from "@llamaindex/mistral"; +import { Settings } from "llamaindex"; Settings.llm = new MistralAI({ model: "mistral-tiny", @@ -29,7 +51,8 @@ You can see examples of other APIs we support by checking out "Available LLMs" i A frequent gotcha when trying to use a different API as your LLM is that LlamaIndex will also by default index and embed your data using OpenAI's embeddings. To completely switch away from OpenAI you will need to set your embedding model as well, for example: ```typescript -import { MistralAIEmbedding, Settings } from "llamaindex"; +import { MistralAIEmbedding } from "@llamaindex/mistral"; +import { Settings } from "llamaindex"; Settings.embedModel = new MistralAIEmbedding(); ``` diff --git a/apps/next/src/content/docs/llamaindex/getting_started/index.mdx b/apps/next/src/content/docs/llamaindex/getting_started/index.mdx index 01e71a398f7bb6668b822533df93dfa7cac5468f..d9b58049d6d430a07e7ed1df8773c84f4fb94d6e 100644 --- a/apps/next/src/content/docs/llamaindex/getting_started/index.mdx +++ b/apps/next/src/content/docs/llamaindex/getting_started/index.mdx @@ -5,6 +5,8 @@ description: Install llamaindex by running a single command. import { Tab, Tabs } from "fumadocs-ui/components/tabs"; +To install llamaindex, run the following command: + <Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> ```shell tab="npm" npm install llamaindex @@ -19,6 +21,25 @@ import { Tab, Tabs } from "fumadocs-ui/components/tabs"; ``` </Tabs> +In most cases, you'll also need an LLM package to use LlamaIndex. For example, to use the OpenAI LLM, you would install the following: + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install @llamaindex/openai + ``` + + ```shell tab="yarn" + yarn add @llamaindex/openai + ``` + + ```shell tab="pnpm" + pnpm add @llamaindex/openai + ``` +</Tabs> + +Go to [Using other LLM APIs](/docs/llamaindex/examples/other_llms) to find out how to use other LLMs. + + ## What's next? <Cards> diff --git a/apps/next/src/content/docs/llamaindex/getting_started/setup/typescript.mdx b/apps/next/src/content/docs/llamaindex/getting_started/setup/typescript.mdx index b25e5ee542f9c628312b72be84f9a4b27e792e65..c9d92f3cbfd016818c8422615ff983a7a8c65d52 100644 --- a/apps/next/src/content/docs/llamaindex/getting_started/setup/typescript.mdx +++ b/apps/next/src/content/docs/llamaindex/getting_started/setup/typescript.mdx @@ -9,7 +9,7 @@ LlamaIndex.TS is written in TypeScript and designed to be used in TypeScript pro We do lots of work on strong typing to make sure you have a great typing experience with LlamaIndex.TS. ```ts twoslash -import { PromptTemplate } from '@llamaindex/core/prompts' +import { PromptTemplate } from 'llamaindex' const promptTemplate = new PromptTemplate({ template: `Context information from multiple sources is below. --------------------- @@ -29,7 +29,7 @@ promptTemplate.format({ ``` ```ts twoslash -import { FunctionTool } from '@llamaindex/core/tools' +import { FunctionTool } from 'llamaindex' import { z } from 'zod' // ---cut-before--- diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/2_create_agent.mdx b/apps/next/src/content/docs/llamaindex/guide/agents/2_create_agent.mdx index 860af45739c36e773e47dda8d0fb46b6d2afe3a5..37fc24b2090048fa5ad209fb19f1388059ea8975 100644 --- a/apps/next/src/content/docs/llamaindex/guide/agents/2_create_agent.mdx +++ b/apps/next/src/content/docs/llamaindex/guide/agents/2_create_agent.mdx @@ -31,7 +31,8 @@ First we'll need to pull in our dependencies. These are: - Dotenv to load our API key from the .env file ```javascript -import { OpenAI, FunctionTool, OpenAIAgent, Settings } from "llamaindex"; +import { FunctionTool, Settings } from "llamaindex"; +import { OpenAI, OpenAIAgent } from "@llamaindex/openai"; import "dotenv/config"; ``` diff --git a/apps/next/src/content/docs/llamaindex/guide/agents/4_agentic_rag.mdx b/apps/next/src/content/docs/llamaindex/guide/agents/4_agentic_rag.mdx index 465f38299a63c04cf179b95bdf2d9c4526e626b9..b690120fbcbc1b20e34d4372e23e8ad5fef52433 100644 --- a/apps/next/src/content/docs/llamaindex/guide/agents/4_agentic_rag.mdx +++ b/apps/next/src/content/docs/llamaindex/guide/agents/4_agentic_rag.mdx @@ -13,22 +13,34 @@ To learn more about RAG, we recommend this [introduction](https://docs.llamainde We're going to start with the same agent we [built in step 1](https://github.com/run-llama/ts-agents/blob/main/1_agent/agent.ts), but make a few changes. You can find the finished version [in the repository](https://github.com/run-llama/ts-agents/blob/main/2_agentic_rag/agent.ts). +## Installation + +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/openai @llamaindex/huggingface + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/openai @llamaindex/huggingface + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/openai @llamaindex/huggingface + ``` +</Tabs> + + ### New dependencies We'll be bringing in `SimpleDirectoryReader`, `HuggingFaceEmbedding`, `VectorStoreIndex`, and `QueryEngineTool`, `OpenAIContextAwareAgent` from LlamaIndex.TS, as well as the dependencies we previously used. ```javascript -import { - OpenAI, - FunctionTool, - OpenAIAgent, - OpenAIContextAwareAgent, - Settings, - SimpleDirectoryReader, - HuggingFaceEmbedding, - VectorStoreIndex, - QueryEngineTool, -} from "llamaindex"; +import { FunctionTool, QueryEngineTool, Settings, VectorStoreIndex } from "llamaindex"; +import { OpenAI, OpenAIAgent } from "@llamaindex/openai"; +import { HuggingFaceEmbedding } from "@llamaindex/huggingface"; +import { SimpleDirectoryReader } from "llamaindex"; ``` ### Add an embedding model diff --git a/apps/next/src/content/docs/llamaindex/guide/loading/index.mdx b/apps/next/src/content/docs/llamaindex/guide/loading/index.mdx index 874788ffedb2d78d83ebb1a840bd42582ff1e71d..91d91868f7d32f2aa62b33aa6863da4067c998c6 100644 --- a/apps/next/src/content/docs/llamaindex/guide/loading/index.mdx +++ b/apps/next/src/content/docs/llamaindex/guide/loading/index.mdx @@ -33,11 +33,11 @@ We offer readers for different file formats. <Tabs groupId="llamaindex-or-readers" items={["llamaindex", "@llamaindex/readers"]} persist> ```ts twoslash tab="llamaindex" - import { CSVReader } from 'llamaindex' - import { PDFReader } from 'llamaindex' - import { JSONReader } from 'llamaindex' - import { MarkdownReader } from 'llamaindex' - import { HTMLReader } from 'llamaindex' + import { CSVReader } from '@llamaindex/readers/csv' + import { PDFReader } from '@llamaindex/readers/pdf' + import { JSONReader } from '@llamaindex/readers/json' + import { MarkdownReader } from '@llamaindex/readers/markdown' + import { HTMLReader } from '@llamaindex/readers/html' // you can find more readers in the documentation ``` @@ -71,7 +71,7 @@ We offer readers for different file formats. ``` ```ts twoslash tab="@llamaindex/readers" - import { SimpleDirectoryReader } from "@llamaindex/readers/directory"; + import { SimpleDirectoryReader } from "llamaindex"; const reader = new SimpleDirectoryReader() const documents = await reader.loadData("./data") diff --git a/apps/next/src/content/docs/llamaindex/guide/loading/node-parser.mdx b/apps/next/src/content/docs/llamaindex/guide/loading/node-parser.mdx index 2c070ed8c12176ba538f66c53473a47ce99b1d8b..51760cd9f6b4fe37b1407354f39eebeebd71dd0c 100644 --- a/apps/next/src/content/docs/llamaindex/guide/loading/node-parser.mdx +++ b/apps/next/src/content/docs/llamaindex/guide/loading/node-parser.mdx @@ -15,7 +15,7 @@ By default, we will use `Settings.nodeParser` to split the document into nodes. ```ts twoslash import { TextFileReader } from '@llamaindex/readers/text' -import { SentenceSplitter } from '@llamaindex/core/node-parser'; +import { SentenceSplitter } from 'llamaindex'; import { Settings } from 'llamaindex'; const nodeParser = new SentenceSplitter(); @@ -28,7 +28,7 @@ Settings.nodeParser = nodeParser; The underlying text splitter will split text by sentences. It can also be used as a standalone module for splitting raw text. ```ts twoslash -import { SentenceSplitter } from "@llamaindex/core/node-parser"; +import { SentenceSplitter } from "llamaindex"; const splitter = new SentenceSplitter({ chunkSize: 1 }); @@ -42,7 +42,7 @@ The `MarkdownNodeParser` is a more advanced `NodeParser` that can handle markdow <Tabs items={["with reader", "with node:fs"]}> ```ts twoslash tab="with reader" - import { MarkdownNodeParser } from "@llamaindex/core/node-parser"; + import { MarkdownNodeParser } from "llamaindex"; import { MarkdownReader } from '@llamaindex/readers/markdown' const reader = new MarkdownReader(); @@ -56,8 +56,7 @@ The `MarkdownNodeParser` is a more advanced `NodeParser` that can handle markdow ```ts twoslash tab="with node:fs" import fs from 'node:fs/promises'; - import { MarkdownNodeParser } from "@llamaindex/core/node-parser"; - import { Document } from '@llamaindex/core/schema'; + import { MarkdownNodeParser, Document } from "llamaindex"; const markdownNodeParser = new MarkdownNodeParser(); const text = await fs.readFile('path/to/file.md', 'utf-8'); diff --git a/apps/next/src/content/docs/llamaindex/integration/vercel.mdx b/apps/next/src/content/docs/llamaindex/integration/vercel.mdx index 7eb946247b5f82dfb6451ff220dcfe9d30ff3329..0640a4edc090f8626580b03aa3a50a053d3ce74a 100644 --- a/apps/next/src/content/docs/llamaindex/integration/vercel.mdx +++ b/apps/next/src/content/docs/llamaindex/integration/vercel.mdx @@ -69,7 +69,7 @@ streamText({ For production deployments, you can use LlamaCloud to store and manage your documents: ```typescript -import { LlamaCloudIndex } from "llamaindex"; +import { LlamaCloudIndex } from "@llamaindex/cloud"; // Create a LlamaCloud index const index = await LlamaCloudIndex.fromDocuments({ diff --git a/apps/next/src/content/docs/llamaindex/modules/data_loaders/json.mdx b/apps/next/src/content/docs/llamaindex/modules/data_loaders/json.mdx index cf69bf73af162005ed723dd467316b7bfd0bcce7..3784b47fd1b9012613292fe4d513318d3060bf96 100644 --- a/apps/next/src/content/docs/llamaindex/modules/data_loaders/json.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/data_loaders/json.mdx @@ -6,10 +6,28 @@ A simple JSON data loader with various options. Either parses the entire string, cleaning it and treat each line as an embedding or performs a recursive depth-first traversal yielding JSON paths. Supports streaming of large JSON data using [@discoveryjs/json-ext](https://github.com/discoveryjs/json-ext) +## Installation + +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/readers + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/readers + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/readers + ``` +</Tabs> + ## Usage ```ts -import { JSONReader } from "llamaindex"; +import { JSONReader } from "@llamaindex/readers/json"; const file = "../../PATH/TO/FILE"; const content = new TextEncoder().encode("JSON_CONTENT"); diff --git a/apps/next/src/content/docs/llamaindex/modules/data_loaders/llama_parse/images.mdx b/apps/next/src/content/docs/llamaindex/modules/data_loaders/llama_parse/images.mdx index b8e099e1803637dcca2d4e9cc0aa146a0d76ff1f..542e63051379e2e6ffe5a32c59008500cb21c731 100644 --- a/apps/next/src/content/docs/llamaindex/modules/data_loaders/llama_parse/images.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/data_loaders/llama_parse/images.mdx @@ -4,6 +4,24 @@ title: Image Retrieval LlamaParse `json` mode supports extracting any images found in a page object by using the `getImages` function. They are downloaded to a local folder and can then be sent to a multimodal LLM for further processing. +## Installation + +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/cloud @llamaindex/openai + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/cloud @llamaindex/openai + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/cloud @llamaindex/openai + ``` +</Tabs> + ## Usage We use the `getImages` method to input our array of JSON objects, download the images to a specified folder and get a list of ImageNodes. @@ -19,14 +37,10 @@ const imageDicts = await reader.getImages(jsonObjs, "images"); You can create an index across both text and image nodes by requesting alternative text for the image from a multimodal LLM. ```ts -import { - Document, - ImageNode, - LlamaParseReader, - OpenAI, - VectorStoreIndex, -} from "llamaindex"; -import { createMessageContent } from "llamaindex/synthesizers/utils"; +import { Document, ImageNode, VectorStoreIndex } from "llamaindex"; +import { LlamaParseReader } from "@llamaindex/cloud"; +import { OpenAI } from "@llamaindex/openai"; +import { createMessageContent } from "llamaindex"; const reader = new LlamaParseReader(); async function main() { diff --git a/apps/next/src/content/docs/llamaindex/modules/data_loaders/llama_parse/json_mode.mdx b/apps/next/src/content/docs/llamaindex/modules/data_loaders/llama_parse/json_mode.mdx index 537d5cba437a13dd5f68b9e2d028ef4e4875ee32..4777f0bb1c24b824d998336d92d7cac63eb31c21 100644 --- a/apps/next/src/content/docs/llamaindex/modules/data_loaders/llama_parse/json_mode.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/data_loaders/llama_parse/json_mode.mdx @@ -4,12 +4,32 @@ title: JSON Mode In JSON mode, LlamaParse will return a data structure representing the parsed object. +## Installation + +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/cloud + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/cloud + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/cloud + ``` +</Tabs> + ## Usage For Json mode, you need to use `loadJson`. The `resultType` is automatically set with this method. More information about indexing the results on the next page. ```ts +import { LlamaParseReader } from "@llamaindex/cloud"; + const reader = new LlamaParseReader(); async function main() { // Load the file and return an array of json objects @@ -59,7 +79,8 @@ All Readers share a `loadData` method with `SimpleDirectoryReader` that promises However, a simple work around is to create a new reader class that extends `LlamaParseReader` and adds a new method or overrides `loadData`, wrapping around JSON mode, extracting the required values, and returning a Document object. ```ts -import { LlamaParseReader, Document } from "llamaindex"; +import { Document } from "llamaindex"; +import { LlamaParseReader } from "@llamaindex/cloud"; class LlamaParseReaderWithJson extends LlamaParseReader { // Override the loadData method diff --git a/apps/next/src/content/docs/llamaindex/modules/data_stores/doc_stores/index.mdx b/apps/next/src/content/docs/llamaindex/modules/data_stores/doc_stores/index.mdx index 9546efd3f773995a7a636c01b8ac0d23329779e3..03b97172d158e27abf1e262ae09eb5541339f76b 100644 --- a/apps/next/src/content/docs/llamaindex/modules/data_stores/doc_stores/index.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/data_stores/doc_stores/index.mdx @@ -11,6 +11,38 @@ Document stores contain ingested document chunks, i.e. [Node](/docs/llamaindex/m Check the [LlamaIndexTS Github](https://github.com/run-llama/LlamaIndexTS) for the most up to date overview of integrations. +## Using PostgreSQL as Document Store + +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/postgres + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/postgres + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/postgres + ``` +</Tabs> + +You can configure the `schemaName`, `tableName`, `namespace`, and +`connectionString`. If a `connectionString` is not +provided, it will use the environment variables `PGHOST`, `PGUSER`, +`PGPASSWORD`, `PGDATABASE` and `PGPORT`. + +```typescript +import { Document, VectorStoreIndex, storageContextFromDefaults } from "llamaindex"; +import { PostgresDocumentStore } from "@llamaindex/postgres"; + +const storageContext = await storageContextFromDefaults({ + docStore: new PostgresDocumentStore(), +}); +``` + ## API Reference - [BaseDocumentStore](/docs/api/classes/BaseDocumentStore) diff --git a/apps/next/src/content/docs/llamaindex/modules/data_stores/index.mdx b/apps/next/src/content/docs/llamaindex/modules/data_stores/index.mdx index 4282f5cdb40c1ca5dcfb4117b2d60f8239398104..0dbc43ade6489fddaf34d287aac6c3d6502c9494 100644 --- a/apps/next/src/content/docs/llamaindex/modules/data_stores/index.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/data_stores/index.mdx @@ -5,46 +5,23 @@ title: Storage Storage in LlamaIndex.TS works automatically once you've configured a `StorageContext` object. -## Local Storage - -You can configure the `persistDir` and attach it to an index. - -```typescript -import { - Document, - VectorStoreIndex, - storageContextFromDefaults, -} from "llamaindex"; - -const storageContext = await storageContextFromDefaults({ - persistDir: "./storage", -}); +Per default a local directory is used for storage. Depening on the storage type (i.e. doc stores, index stores or vector stores), you can configure a different persistence layer. +Most commonly a vector database is used as vector store. -const document = new Document({ text: "Test Text" }); -const index = await VectorStoreIndex.fromDocuments([document], { - storageContext, -}); -``` -## PostgreSQL Storage +## Local Storage -You can configure the `schemaName`, `tableName`, `namespace`, and -`connectionString`. If a `connectionString` is not -provided, it will use the environment variables `PGHOST`, `PGUSER`, -`PGPASSWORD`, `PGDATABASE` and `PGPORT`. +You can configure the `persistDir` to define where to store the data locally. ```typescript import { Document, VectorStoreIndex, - PostgresDocumentStore, - PostgresIndexStore, storageContextFromDefaults, } from "llamaindex"; const storageContext = await storageContextFromDefaults({ - docStore: new PostgresDocumentStore(), - indexStore: new PostgresIndexStore(), + persistDir: "./storage", }); const document = new Document({ text: "Test Text" }); diff --git a/apps/next/src/content/docs/llamaindex/modules/data_stores/index_stores/index.mdx b/apps/next/src/content/docs/llamaindex/modules/data_stores/index_stores/index.mdx index 19b063c28a81ecc55f0604a1c7d0a13e263cb788..34effb7fabc925409da3d6baddcb52ba8c52ed9e 100644 --- a/apps/next/src/content/docs/llamaindex/modules/data_stores/index_stores/index.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/data_stores/index_stores/index.mdx @@ -11,6 +11,38 @@ Index stores are underlying storage components that contain metadata(i.e. inform Check the [LlamaIndexTS Github](https://github.com/run-llama/LlamaIndexTS) for the most up to date overview of integrations. +## Using PostgreSQL as Index Store + +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/postgres + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/postgres + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/postgres + ``` +</Tabs> + +You can configure the `schemaName`, `tableName`, `namespace`, and +`connectionString`. If a `connectionString` is not +provided, it will use the environment variables `PGHOST`, `PGUSER`, +`PGPASSWORD`, `PGDATABASE` and `PGPORT`. + +```typescript +import { Document, VectorStoreIndex, storageContextFromDefaults } from "llamaindex"; +import { PostgresIndexStore } from "@llamaindex/postgres"; + +const storageContext = await storageContextFromDefaults({ + indexStore: new PostgresIndexStore(), +}); +``` + ## API Reference - [BaseIndexStore](/docs/api/classes/BaseIndexStore) diff --git a/apps/next/src/content/docs/llamaindex/modules/data_stores/vector_stores/qdrant.mdx b/apps/next/src/content/docs/llamaindex/modules/data_stores/vector_stores/qdrant.mdx index c5a363340cf10bb48804acc60f14f84535ceadb6..5b86585b102a519392db4017755e853845465123 100644 --- a/apps/next/src/content/docs/llamaindex/modules/data_stores/vector_stores/qdrant.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/data_stores/vector_stores/qdrant.mdx @@ -11,11 +11,30 @@ docker pull qdrant/qdrant docker run -p 6333:6333 qdrant/qdrant ``` +## Installation + +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/qdrant + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/qdrant + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/qdrant + ``` +</Tabs> + ## Importing the modules ```ts import fs from "node:fs/promises"; -import { Document, VectorStoreIndex, QdrantVectorStore } from "llamaindex"; +import { Document, VectorStoreIndex } from "llamaindex"; +import { QdrantVectorStore } from "@llamaindex/qdrant"; ``` ## Load the documents @@ -60,7 +79,8 @@ console.log(response.toString()); ```ts import fs from "node:fs/promises"; -import { Document, VectorStoreIndex, QdrantVectorStore } from "llamaindex"; +import { Document, VectorStoreIndex } from "llamaindex"; +import { QdrantVectorStore } from "@llamaindex/qdrant"; async function main() { const path = "node_modules/llamaindex/examples/abramov.txt"; diff --git a/apps/next/src/content/docs/llamaindex/modules/documents_and_nodes/metadata_extraction.mdx b/apps/next/src/content/docs/llamaindex/modules/documents_and_nodes/metadata_extraction.mdx index fbadb14a0ff5684c8f5c3efd49355c2d1e136f50..8f92a8c6b7967d8c04ea3fe6284dff0a93457138 100644 --- a/apps/next/src/content/docs/llamaindex/modules/documents_and_nodes/metadata_extraction.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/documents_and_nodes/metadata_extraction.mdx @@ -14,13 +14,8 @@ Our metadata extractor modules include the following "feature extractors": Then you can chain the `Metadata Extractors` with the `IngestionPipeline` to extract metadata from a set of documents. ```ts -import { - IngestionPipeline, - TitleExtractor, - QuestionsAnsweredExtractor, - Document, - OpenAI, -} from "llamaindex"; +import { Document, IngestionPipeline, TitleExtractor, QuestionsAnsweredExtractor } from "llamaindex"; +import { OpenAI } from "@llamaindex/openai"; async function main() { const pipeline = new IngestionPipeline({ diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/deepinfra.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/deepinfra.mdx index 3cfd6c369d433a1f79eb74d7044a2c634166dbaf..6993905a16ead1414770420c248d1da53e374821 100644 --- a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/deepinfra.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/deepinfra.mdx @@ -5,13 +5,27 @@ title: DeepInfra To use DeepInfra embeddings, you need to import `DeepInfraEmbedding` from llamaindex. Check out available embedding models [here](https://deepinfra.com/models/embeddings). +## Installation + +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/deepinfra + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/deepinfra + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/deepinfra + ``` +</Tabs> + ```ts -import { - DeepInfraEmbedding, - Settings, - Document, - VectorStoreIndex, -} from "llamaindex"; +import { Document, Settings, VectorStoreIndex } from "llamaindex"; +import { DeepInfraEmbedding } from "@llamaindex/deepinfra"; // Update Embed Model Settings.embedModel = new DeepInfraEmbedding(); @@ -33,7 +47,7 @@ By default, DeepInfraEmbedding is using the sentence-transformers/clip-ViT-B-32 For example: ```ts -import { DeepInfraEmbedding } from "llamaindex"; +import { DeepInfraEmbedding } from "@llamaindex/deepinfra"; const model = "intfloat/e5-large-v2"; Settings.embedModel = new DeepInfraEmbedding({ @@ -46,7 +60,8 @@ You can also set the `maxRetries` and `timeout` parameters when initializing `De For example: ```ts -import { DeepInfraEmbedding, Settings } from "llamaindex"; +import { Settings } from "llamaindex"; +import { DeepInfraEmbedding } from "@llamaindex/deepinfra"; const model = "intfloat/e5-large-v2"; const maxRetries = 5; @@ -62,7 +77,7 @@ Settings.embedModel = new DeepInfraEmbedding({ Standalone usage: ```ts -import { DeepInfraEmbedding } from "llamaindex"; +import { DeepInfraEmbedding } from "@llamaindex/deepinfra"; import { config } from "dotenv"; // For standalone usage, you need to configure DEEPINFRA_API_TOKEN in .env file config(); diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/gemini.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/gemini.mdx index 074482cc4646a18e3170b6e14dda0ed739a9ed9d..36204a657e68cd9f11285fac527325fb0596c7ef 100644 --- a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/gemini.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/gemini.mdx @@ -2,10 +2,29 @@ title: Gemini --- -To use Gemini embeddings, you need to import `GeminiEmbedding` from `llamaindex`. +To use Gemini embeddings, you need to import `GeminiEmbedding` from `@llamaindex/google`. + +## Installation + +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/google + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/google + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/google + ``` +</Tabs> ```ts -import { GeminiEmbedding, Settings } from "llamaindex"; +import { Document, Settings, VectorStoreIndex } from "llamaindex"; +import { GeminiEmbedding, GEMINI_MODEL } from "@llamaindex/google"; // Update Embed Model Settings.embedModel = new GeminiEmbedding(); @@ -27,7 +46,7 @@ Per default, `GeminiEmbedding` is using the `gemini-pro` model. You can change t For example: ```ts -import { GEMINI_MODEL, GeminiEmbedding } from "llamaindex"; +import { GEMINI_MODEL, GeminiEmbedding } from "@llamaindex/google"; Settings.embedModel = new GeminiEmbedding({ model: GEMINI_MODEL.GEMINI_PRO_LATEST, diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/huggingface.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/huggingface.mdx index 7b37de3a682d11641ccf6afec070eec7f0bd379f..5fe36edf72a102b19d4ad5f22d11ce04e9da779a 100644 --- a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/huggingface.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/huggingface.mdx @@ -2,10 +2,29 @@ title: HuggingFace --- -To use HuggingFace embeddings, you need to import `HuggingFaceEmbedding` from `llamaindex`. +To use HuggingFace embeddings, you need to import `HuggingFaceEmbedding` from `@llamaindex/huggingface`. + +## Installation + +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/huggingface + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/huggingface + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/huggingface + ``` +</Tabs> ```ts -import { HuggingFaceEmbedding, Settings } from "llamaindex"; +import { Document, Settings, VectorStoreIndex } from "llamaindex"; +import { HuggingFaceEmbedding } from "@llamaindex/huggingface"; // Update Embed Model Settings.embedModel = new HuggingFaceEmbedding(); @@ -29,6 +48,8 @@ If you're not using a quantized model, set the `quantized` parameter to `false`. For example, to use the not quantized `BAAI/bge-small-en-v1.5` model, you can use the following code: ```ts +import { HuggingFaceEmbedding } from "@llamaindex/huggingface"; + Settings.embedModel = new HuggingFaceEmbedding({ modelType: "BAAI/bge-small-en-v1.5", quantized: false, diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/mistral.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/mistral.mdx index b7722640e00212dc9bf4141ee39e5f666825c884..70b94b9290dc5ee038a7ab2d2cae771350b4f609 100644 --- a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/mistral.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/mistral.mdx @@ -2,10 +2,29 @@ title: MistralAI --- -To use MistralAI embeddings, you need to import `MistralAIEmbedding` from `llamaindex`. +To use MistralAI embeddings, you need to import `MistralAIEmbedding` from `@llamaindex/mistral`. + +## Installation + +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/mistral + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/mistral + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/mistral + ``` +</Tabs> ```ts -import { MistralAIEmbedding, Settings } from "llamaindex"; +import { Document, Settings, VectorStoreIndex } from "llamaindex"; +import { MistralAIEmbedding } from "@llamaindex/mistral"; // Update Embed Model Settings.embedModel = new MistralAIEmbedding({ diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/mixedbreadai.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/mixedbreadai.mdx index f8395b58fbff69978331139c9ac6a57d603b177e..5983aeacd41987c18ef03307d67cdeef3cc3020f 100644 --- a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/mixedbreadai.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/mixedbreadai.mdx @@ -14,16 +14,28 @@ To find out more about the latest features, updates, and available models, visit ## Setup -First, you will need to install the `llamaindex` package. +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/mixedbread + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/mixedbread + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/mixedbread + ``` +</Tabs> -```bash -pnpm install llamaindex -``` Next, sign up for an API key at [mixedbread.ai](https://mixedbread.ai/). Once you have your API key, you can import the necessary modules and create a new instance of the `MixedbreadAIEmbeddings` class. ```ts -import { MixedbreadAIEmbeddings, Document, Settings } from "llamaindex"; +import { MixedbreadAIEmbeddings } from "@llamaindex/mixedbread"; +import { Document, Settings } from "llamaindex"; ``` ## Usage with LlamaIndex diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/ollama.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/ollama.mdx index b7e7eeb916cd6a2a99ed6de84744fbb39fad0a63..499536d35f42d6cffaa5225bc3e3d2e8dd0515a7 100644 --- a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/ollama.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/ollama.mdx @@ -2,7 +2,7 @@ title: Ollama --- -To use Ollama embeddings, you need to import `OllamaEmbedding` from `llamaindex`. +To use Ollama embeddings, you need to import `OllamaEmbedding` from `@llamaindex/ollama`. Note that you need to pull the embedding model first before using it. @@ -12,8 +12,27 @@ In the example below, we're using the [`nomic-embed-text`](https://ollama.com/li ollama pull nomic-embed-text ``` +## Installation + +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/ollama + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/ollama + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/ollama + ``` +</Tabs> + ```ts -import { OllamaEmbedding, Settings } from "llamaindex"; +import { OllamaEmbedding } from "@llamaindex/ollama"; +import { Document, Settings, VectorStoreIndex } from "llamaindex"; Settings.embedModel = new OllamaEmbedding({ model: "nomic-embed-text" }); diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/openai.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/openai.mdx index cf1ef52bad6d5b838da62f94468b66f1a8119bcc..d6fefb7bfdc9505bc0bf3388dae0bb483092d945 100644 --- a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/openai.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/openai.mdx @@ -2,10 +2,29 @@ title: OpenAI --- -To use OpenAI embeddings, you need to import `OpenAIEmbedding` from `llamaindex`. +To use OpenAI embeddings, you need to import `OpenAIEmbedding` from `@llamaindex/openai`. + +## Installation + +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/openai + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/openai + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/openai + ``` +</Tabs> ```ts -import { OpenAIEmbedding, Settings } from "llamaindex"; +import { OpenAIEmbedding } from "@llamaindex/openai"; +import { Document, Settings, VectorStoreIndex } from "llamaindex"; Settings.embedModel = new OpenAIEmbedding(); diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/index.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/index.mdx index d1a91100716d83e3852024f87d8028abc86b11b4..62649cc01187eaf6f5407676ae018b017cc20395 100644 --- a/apps/next/src/content/docs/llamaindex/modules/embeddings/index.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/embeddings/index.mdx @@ -6,8 +6,27 @@ The embedding model in LlamaIndex is responsible for creating numerical represen This can be explicitly updated through `Settings` +## Installation + +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/openai + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/openai + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/openai + ``` +</Tabs> + ```typescript -import { OpenAIEmbedding, Settings } from "llamaindex"; +import { OpenAIEmbedding } from "@llamaindex/openai"; +import { Settings } from "llamaindex"; Settings.embedModel = new OpenAIEmbedding({ model: "text-embedding-ada-002", diff --git a/apps/next/src/content/docs/llamaindex/modules/evaluation/modules/correctness.mdx b/apps/next/src/content/docs/llamaindex/modules/evaluation/modules/correctness.mdx index 2e835940949cdfc4d06283c1252ea2923ac522cd..50cb3c856ad3b37a4858c67fe57b4e2ded5ad6c5 100644 --- a/apps/next/src/content/docs/llamaindex/modules/evaluation/modules/correctness.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/evaluation/modules/correctness.mdx @@ -10,9 +10,21 @@ This is useful for measuring if the response was correct. The evaluator returns Firstly, you need to install the package: -```bash -pnpm i llamaindex -``` +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/openai + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/openai + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/openai + ``` +</Tabs> Set the OpenAI API key: @@ -23,7 +35,8 @@ export OPENAI_API_KEY=your-api-key Import the required modules: ```ts -import { CorrectnessEvaluator, OpenAI, Settings, Response } from "llamaindex"; +import { OpenAI } from "@llamaindex/openai"; +import { CorrectnessEvaluator, Settings, Response } from "llamaindex"; ``` Let's setup gpt-4 for better results: diff --git a/apps/next/src/content/docs/llamaindex/modules/evaluation/modules/faithfulness.mdx b/apps/next/src/content/docs/llamaindex/modules/evaluation/modules/faithfulness.mdx index bf87bad80e2836df7717070b29b50b579adc9e44..a875b9c39b203e1e49d9b6e5c643b2f913106701 100644 --- a/apps/next/src/content/docs/llamaindex/modules/evaluation/modules/faithfulness.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/evaluation/modules/faithfulness.mdx @@ -12,9 +12,22 @@ This is useful for measuring if the response was hallucinated. The evaluator ret Firstly, you need to install the package: -```bash -pnpm i llamaindex -``` +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/openai + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/openai + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/openai + ``` +</Tabs> + Set the OpenAI API key: @@ -25,12 +38,12 @@ export OPENAI_API_KEY=your-api-key Import the required modules: ```ts +import { OpenAI } from "@llamaindex/openai"; import { Document, FaithfulnessEvaluator, - OpenAI, - VectorStoreIndex, Settings, + VectorStoreIndex, } from "llamaindex"; ``` diff --git a/apps/next/src/content/docs/llamaindex/modules/evaluation/modules/relevancy.mdx b/apps/next/src/content/docs/llamaindex/modules/evaluation/modules/relevancy.mdx index 211998fb26c9b51445d84a83e73b61d5229a6b3a..ec07979d6da29683a556347f5174404003d397a8 100644 --- a/apps/next/src/content/docs/llamaindex/modules/evaluation/modules/relevancy.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/evaluation/modules/relevancy.mdx @@ -10,9 +10,22 @@ It is useful for measuring if the response was relevant to the query. The evalua Firstly, you need to install the package: -```bash -pnpm i llamaindex -``` +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/openai + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/openai + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/openai + ``` +</Tabs> + Set the OpenAI API key: @@ -23,11 +36,11 @@ export OPENAI_API_KEY=your-api-key Import the required modules: ```ts +import { OpenAI } from "@llamaindex/openai"; import { + Document, RelevancyEvaluator, - OpenAI, Settings, - Document, VectorStoreIndex, } from "llamaindex"; ``` diff --git a/apps/next/src/content/docs/llamaindex/modules/ingestion_pipeline/index.mdx b/apps/next/src/content/docs/llamaindex/modules/ingestion_pipeline/index.mdx index 045e7c5cf044b4b1f8007062305bab2278542f56..74c2eed160059c3065b77cb0d86901a9863222ee 100644 --- a/apps/next/src/content/docs/llamaindex/modules/ingestion_pipeline/index.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/ingestion_pipeline/index.mdx @@ -5,18 +5,35 @@ title: Ingestion Pipeline An `IngestionPipeline` uses a concept of `Transformations` that are applied to input data. These `Transformations` are applied to your input data, and the resulting nodes are either returned or inserted into a vector database (if given). +## Installation + +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/openai @llamaindex/qdrant + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/openai @llamaindex/qdrant + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/openai @llamaindex/qdrant + ``` +</Tabs> + ## Usage Pattern The simplest usage is to instantiate an IngestionPipeline like so: ```ts import fs from "node:fs/promises"; - +import { OpenAI, OpenAIEmbedding } from "@llamaindex/openai"; import { Document, IngestionPipeline, MetadataMode, - OpenAIEmbedding, TitleExtractor, SentenceSplitter, } from "llamaindex"; @@ -58,14 +75,14 @@ Then, you can construct an index from that vector store later on. ```ts import fs from "node:fs/promises"; +import { OpenAIEmbedding } from "@llamaindex/openai"; +import { QdrantVectorStore } from "@llamaindex/qdrant"; import { Document, IngestionPipeline, MetadataMode, - OpenAIEmbedding, TitleExtractor, SentenceSplitter, - QdrantVectorStore, VectorStoreIndex, } from "llamaindex"; diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/anthropic.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/anthropic.mdx index 7b508fa476cabf85713f7a59dc84ca3942f2289e..b96aca404b3383c8b3221afd135c289d7cf85e7f 100644 --- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/anthropic.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/anthropic.mdx @@ -2,10 +2,29 @@ title: Anthropic --- +## Installation + +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/anthropic + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/anthropic + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/anthropic + ``` +</Tabs> + ## Usage ```ts -import { Anthropic, Settings } from "llamaindex"; +import { Settings } from "llamaindex"; +import { Anthropic } from "@llamaindex/anthropic"; Settings.llm = new Anthropic({ apiKey: "<YOUR_API_KEY>", @@ -37,7 +56,8 @@ const results = await queryEngine.query({ ## Full Example ```ts -import { Anthropic, Document, VectorStoreIndex, Settings } from "llamaindex"; +import { Document, VectorStoreIndex, Settings } from "llamaindex"; +import { Anthropic } from "@llamaindex/anthropic"; Settings.llm = new Anthropic({ apiKey: "<YOUR_API_KEY>", diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/azure.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/azure.mdx index e43cf39f1cf3c30328450ea2063d759b4c8df94d..3c0231841b4cd502b2751194bd12ba0b0c6f80c8 100644 --- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/azure.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/azure.mdx @@ -14,10 +14,29 @@ export AZURE_OPENAI_ENDPOINT="<YOUR ENDPOINT, see https://learn.microsoft.com/en export AZURE_OPENAI_DEPLOYMENT="gpt-4" # or some other deployment name ``` +## Installation + +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/openai + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/openai + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/openai + ``` +</Tabs> + ## Usage ```ts -import { OpenAI, Settings } from "llamaindex"; +import { Settings } from "llamaindex"; +import { OpenAI } from "@llamaindex/openai"; Settings.llm = new OpenAI({ model: "gpt-4", temperature: 0 }); ``` @@ -47,7 +66,8 @@ const results = await queryEngine.query({ ## Full Example ```ts -import { OpenAI, Document, VectorStoreIndex, Settings } from "llamaindex"; +import { Document, VectorStoreIndex, Settings } from "llamaindex"; +import { OpenAI } from "@llamaindex/openai"; Settings.llm = new OpenAI({ model: "gpt-4", temperature: 0 }); diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/bedrock.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/bedrock.mdx index ba9f52c14db8682bb0aa2145205bddd258994828..5c926550d899c01b6526b70e38e9287e1212a893 100644 --- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/bedrock.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/bedrock.mdx @@ -2,6 +2,24 @@ title: Bedrock --- +## Installation + +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/community + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/community + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/community + ``` +</Tabs> + ## Usage ```ts diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/deepinfra.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/deepinfra.mdx index 7a82ffeeecb724875b1d08ea5cb339a375ba8a2b..ecd5f10d731f6f12a1dc930cfc2c2c017a47d7c5 100644 --- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/deepinfra.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/deepinfra.mdx @@ -4,8 +4,27 @@ title: DeepInfra Check out available LLMs [here](https://deepinfra.com/models/text-generation). +## Installation + +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/deepinfra + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/deepinfra + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/deepinfra + ``` +</Tabs> + ```ts -import { DeepInfra, Settings } from "llamaindex"; +import { DeepInfra } from "@llamaindex/deepinfra"; +import { Settings } from "llamaindex"; // Get the API key from `DEEPINFRA_API_TOKEN` environment variable import { config } from "dotenv"; @@ -28,6 +47,8 @@ export DEEPINFRA_API_TOKEN="<YOUR_API_KEY>" For this example, we will use a single document. In a real-world scenario, you would have multiple documents to index. ```ts +import { Document, VectorStoreIndex } from "llamaindex"; + const document = new Document({ text: essay, id_: "essay" }); const index = await VectorStoreIndex.fromDocuments([document]); @@ -48,7 +69,8 @@ const results = await queryEngine.query({ ## Full Example ```ts -import { DeepInfra, Document, VectorStoreIndex, Settings } from "llamaindex"; +import { DeepInfra } from "@llamaindex/deepinfra"; +import { Document, VectorStoreIndex, Settings } from "llamaindex"; // Use custom LLM const model = "meta-llama/Meta-Llama-3-8B-Instruct"; diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/gemini.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/gemini.mdx index 0f981b67869bbef5278ff80dbe6b490dd97cf9f9..8a77cbca9109389a88a5ffd0daaab84c53d76274 100644 --- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/gemini.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/gemini.mdx @@ -2,10 +2,29 @@ title: Gemini --- +## Installation + +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/google + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/google + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/google + ``` +</Tabs> + ## Usage ```ts -import { Gemini, Settings, GEMINI_MODEL } from "llamaindex"; +import { Gemini, GEMINI_MODEL } from "@llamaindex/google"; +import { Settings } from "llamaindex"; Settings.llm = new Gemini({ model: GEMINI_MODEL.GEMINI_PRO, @@ -19,7 +38,7 @@ To use Gemini via Vertex AI you can use `GeminiVertexSession`. GeminiVertexSession accepts the env variables: `GOOGLE_VERTEX_LOCATION` and `GOOGLE_VERTEX_PROJECT` ```ts -import { Gemini, GEMINI_MODEL, GeminiVertexSession } from "llamaindex"; +import { Gemini, GEMINI_MODEL, GeminiVertexSession } from "@llamaindex/google"; const gemini = new Gemini({ model: GEMINI_MODEL.GEMINI_PRO, @@ -47,6 +66,8 @@ To authenticate for production you'll have to use a [service account](https://cl For this example, we will use a single document. In a real-world scenario, you would have multiple documents to index. ```ts +import { Document, VectorStoreIndex } from "llamaindex"; + const document = new Document({ text: essay, id_: "essay" }); const index = await VectorStoreIndex.fromDocuments([document]); @@ -67,13 +88,8 @@ const results = await queryEngine.query({ ## Full Example ```ts -import { - Gemini, - Document, - VectorStoreIndex, - Settings, - GEMINI_MODEL, -} from "llamaindex"; +import { Gemini, GEMINI_MODEL } from "@llamaindex/google"; +import { Document, VectorStoreIndex, Settings } from "llamaindex"; Settings.llm = new Gemini({ model: GEMINI_MODEL.GEMINI_PRO, diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/groq.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/groq.mdx index b2bcfceddf1695279ce9e45edf1932760393a6e7..a570a1fef711bb27710cdc35b25b3d2b3f9009b7 100644 --- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/groq.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/groq.mdx @@ -5,6 +5,24 @@ title: Groq import { DynamicCodeBlock } from 'fumadocs-ui/components/dynamic-codeblock'; import CodeSource from "!raw-loader!../../../../../../../../../examples/groq.ts"; +## Installation + +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/groq + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/groq + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/groq + ``` +</Tabs> + ## Usage First, create an API key at the [Groq Console](https://console.groq.com/keys). Then save it in your environment: @@ -16,7 +34,8 @@ export GROQ_API_KEY=<your-api-key> The initialize the Groq module. ```ts -import { Groq, Settings } from "llamaindex"; +import { Groq } from "@llamaindex/groq"; +import { Settings } from "llamaindex"; Settings.llm = new Groq({ // If you do not wish to set your API key in the environment, you may @@ -30,6 +49,8 @@ Settings.llm = new Groq({ For this example, we will use a single document. In a real-world scenario, you would have multiple documents to index. ```ts +import { Document, VectorStoreIndex } from "llamaindex"; + const document = new Document({ text: essay, id_: "essay" }); const index = await VectorStoreIndex.fromDocuments([document]); diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/llama2.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/llama2.mdx index 47202a66d5d8c2279f672dc98f6bbd7799e11413..ac74c4ebd29bc32ecd22b5bbf0e6c1ca10d8e02e 100644 --- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/llama2.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/llama2.mdx @@ -2,10 +2,29 @@ title: LLama2 --- +## Installation + +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/replicate + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/replicate + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/replicate + ``` +</Tabs> + ## Usage ```ts -import { Ollama, Settings, DeuceChatStrategy } from "llamaindex"; +import { LlamaDeuce, DeuceChatStrategy } from "@llamaindex/replicate"; +import { Document, VectorStoreIndex, Settings } from "llamaindex"; Settings.llm = new LlamaDeuce({ chatStrategy: DeuceChatStrategy.META }); ``` @@ -13,12 +32,8 @@ Settings.llm = new LlamaDeuce({ chatStrategy: DeuceChatStrategy.META }); ## Usage with Replication ```ts -import { - Ollama, - ReplicateSession, - Settings, - DeuceChatStrategy, -} from "llamaindex"; +import { Settings } from "llamaindex"; +import { LlamaDeuce, DeuceChatStrategy, ReplicateSession } from "@llamaindex/replicate"; const replicateSession = new ReplicateSession({ replicateKey, @@ -55,13 +70,8 @@ const results = await queryEngine.query({ ## Full Example ```ts -import { - LlamaDeuce, - Document, - VectorStoreIndex, - Settings, - DeuceChatStrategy, -} from "llamaindex"; +import { LlamaDeuce, DeuceChatStrategy } from "@llamaindex/replicate"; +import { Document, VectorStoreIndex, Settings } from "llamaindex"; // Use the LlamaDeuce LLM Settings.llm = new LlamaDeuce({ chatStrategy: DeuceChatStrategy.META }); diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/mistral.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/mistral.mdx index ef25de1e7b0ce0e3d0bdb4136ae4e5c00af5faab..1ec50677b89c9b819a54c727566829cc179bd9d0 100644 --- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/mistral.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/mistral.mdx @@ -2,10 +2,29 @@ title: Mistral --- +## Installation + +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/mistral + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/mistral + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/mistral + ``` +</Tabs> + ## Usage ```ts -import { MistralAI, Settings } from "llamaindex"; +import { MistralAI } from "@llamaindex/mistral"; +import { Settings } from "llamaindex"; Settings.llm = new MistralAI({ model: "mistral-tiny", @@ -18,6 +37,8 @@ Settings.llm = new MistralAI({ For this example, we will use a single document. In a real-world scenario, you would have multiple documents to index. ```ts +import { Document, VectorStoreIndex } from "llamaindex"; + const document = new Document({ text: essay, id_: "essay" }); const index = await VectorStoreIndex.fromDocuments([document]); @@ -38,7 +59,8 @@ const results = await queryEngine.query({ ## Full Example ```ts -import { MistralAI, Document, VectorStoreIndex, Settings } from "llamaindex"; +import { MistralAI } from "@llamaindex/mistral"; +import { Document, VectorStoreIndex, Settings } from "llamaindex"; // Use the MistralAI LLM Settings.llm = new MistralAI({ model: "mistral-tiny" }); diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/ollama.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/ollama.mdx index 81e1eddbec6632e7b5d12383f0b1bd65c4ea3153..ed3161f2446ddde0f7bf5b4a15e81f5e063d143a 100644 --- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/ollama.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/ollama.mdx @@ -2,10 +2,30 @@ title: Ollama --- +## Installation + +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/ollama + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/ollama + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/ollama + ``` +</Tabs> + + ## Usage ```ts -import { Ollama, Settings } from "llamaindex"; +import { Ollama } from "@llamaindex/ollama"; +import { Settings } from "llamaindex"; Settings.llm = ollamaLLM; Settings.embedModel = ollamaLLM; @@ -16,6 +36,8 @@ Settings.embedModel = ollamaLLM; For this example, we will use a single document. In a real-world scenario, you would have multiple documents to index. ```ts +import { Document, VectorStoreIndex } from "llamaindex"; + const document = new Document({ text: essay, id_: "essay" }); const index = await VectorStoreIndex.fromDocuments([document]); @@ -36,7 +58,8 @@ const results = await queryEngine.query({ ## Full Example ```ts -import { Ollama, Document, VectorStoreIndex, Settings } from "llamaindex"; +import { Ollama } from "@llamaindex/ollama"; +import { Document, VectorStoreIndex, Settings } from "llamaindex"; import fs from "fs/promises"; diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/openai.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/openai.mdx index 7211bd6c643a30150feaf374c23e2de80f0755bf..9afdba1ccf24bba9a4384bbf5192c913a0de9402 100644 --- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/openai.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/openai.mdx @@ -2,8 +2,28 @@ title: OpenAI --- +## Installation + +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/openai + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/openai + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/openai + ``` +</Tabs> + + ```ts -import { OpenAI, Settings } from "llamaindex"; +import { OpenAI } from "@llamaindex/openai"; +import { Settings } from "llamaindex"; Settings.llm = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0, apiKey: <YOUR_API_KEY> }); ``` @@ -19,6 +39,8 @@ export OPENAI_API_KEY="<YOUR_API_KEY>" For this example, we will use a single document. In a real-world scenario, you would have multiple documents to index. ```ts +import { Document, VectorStoreIndex } from "llamaindex"; + const document = new Document({ text: essay, id_: "essay" }); const index = await VectorStoreIndex.fromDocuments([document]); @@ -39,7 +61,8 @@ const results = await queryEngine.query({ ## Full Example ```ts -import { OpenAI, Document, VectorStoreIndex, Settings } from "llamaindex"; +import { OpenAI } from "@llamaindex/openai"; +import { Document, Settings, VectorStoreIndex } from "llamaindex"; // Use the OpenAI LLM Settings.llm = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 }); diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/portkey.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/portkey.mdx index b199afcbfa77a12e8138612ba2e67914ad942a80..efd003e4f2e2dce75f48795d99e9f3aa2c83d1c8 100644 --- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/portkey.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/portkey.mdx @@ -2,10 +2,30 @@ title: Portkey LLM --- +## Installation + +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/portkey-ai + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/portkey-ai + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/portkey-ai + ``` +</Tabs> + + ## Usage ```ts -import { Portkey, Settings } from "llamaindex"; +import { Portkey } from "@llamaindex/portkey-ai"; +import { Settings } from "llamaindex"; Settings.llm = new Portkey({ apiKey: "<YOUR_API_KEY>", @@ -17,6 +37,8 @@ Settings.llm = new Portkey({ For this example, we will use a single document. In a real-world scenario, you would have multiple documents to index. ```ts +import { Document, VectorStoreIndex } from "llamaindex"; + const document = new Document({ text: essay, id_: "essay" }); const index = await VectorStoreIndex.fromDocuments([document]); @@ -37,7 +59,8 @@ const results = await queryEngine.query({ ## Full Example ```ts -import { Portkey, Document, VectorStoreIndex, Settings } from "llamaindex"; +import { Portkey } from "@llamaindex/portkey-ai"; +import { Document, Settings, VectorStoreIndex } from "llamaindex"; // Use the Portkey LLM Settings.llm = new Portkey({ diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/together.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/together.mdx index 6e395bc3a2ac07112cbfa3710936a2e616b79220..65cc58c416341d17edb0a11cac786642c6bfb243 100644 --- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/together.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/together.mdx @@ -2,10 +2,28 @@ title: Together LLM --- +## Installation + +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex + ``` + + ```shell tab="yarn" + yarn add llamaindex + ``` + + ```shell tab="pnpm" + pnpm add llamaindex + ``` +</Tabs> + ## Usage ```ts -import { TogetherLLM, Settings } from "llamaindex"; +import { Settings, TogetherLLM } from "llamaindex"; Settings.llm = new TogetherLLM({ apiKey: "<YOUR_API_KEY>", @@ -17,6 +35,8 @@ Settings.llm = new TogetherLLM({ For this example, we will use a single document. In a real-world scenario, you would have multiple documents to index. ```ts +import { Document, VectorStoreIndex } from "llamaindex"; + const document = new Document({ text: essay, id_: "essay" }); const index = await VectorStoreIndex.fromDocuments([document]); @@ -37,7 +57,8 @@ const results = await queryEngine.query({ ## Full Example ```ts -import { TogetherLLM, Document, VectorStoreIndex, Settings } from "llamaindex"; +import { TogetherLLM } from "@llamaindex/together"; +import { Document, Settings, VectorStoreIndex } from "llamaindex"; Settings.llm = new TogetherLLM({ apiKey: "<YOUR_API_KEY>", diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/index.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/index.mdx index 7fecb26f9e15164901a31ee75eb71128156e9057..1ee9b636dca4952027c782692b3cf246a42a62d7 100644 --- a/apps/next/src/content/docs/llamaindex/modules/llms/index.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/llms/index.mdx @@ -2,12 +2,31 @@ title: Large Language Models (LLMs) --- -The LLM is responsible for reading text and generating natural language responses to queries. By default, LlamaIndex.TS uses `gpt-3.5-turbo`. +The LLM is responsible for reading text and generating natural language responses to queries. By default, LlamaIndex.TS uses `gpt-4o`. The LLM can be explicitly updated through `Settings`. +## Installation + +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/openai + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/openai + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/openai + ``` +</Tabs> + ```typescript -import { OpenAI, Settings } from "llamaindex"; +import { OpenAI } from "@llamaindex/openai"; +import { Settings } from "llamaindex"; Settings.llm = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 }); ``` diff --git a/apps/next/src/content/docs/llamaindex/modules/node_parser.mdx b/apps/next/src/content/docs/llamaindex/modules/node_parser.mdx index 5f9af92c7e0bd269944c17019f70bbd99e982ee6..2ce4d57f1540b45857dfd9d94d3cd12c1436415f 100644 --- a/apps/next/src/content/docs/llamaindex/modules/node_parser.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/node_parser.mdx @@ -5,7 +5,8 @@ title: NodeParser The `NodeParser` in LlamaIndex is responsible for splitting `Document` objects into more manageable `Node` objects. When you call `.fromDocuments()`, the `NodeParser` from the `Settings` is used to do this automatically for you. Alternatively, you can use it to split documents ahead of time. ```typescript -import { Document, SentenceSplitter } from "llamaindex"; +import { Document } from "llamaindex"; +import { SentenceSplitter } from "llamaindex"; const nodeParser = new SentenceSplitter(); @@ -30,6 +31,7 @@ The `MarkdownNodeParser` is a more advanced `NodeParser` that can handle markdow ```typescript import { MarkdownNodeParser } from "llamaindex"; +import { Document } from "llamaindex"; const nodeParser = new MarkdownNodeParser(); diff --git a/apps/next/src/content/docs/llamaindex/modules/node_postprocessors/cohere_reranker.mdx b/apps/next/src/content/docs/llamaindex/modules/node_postprocessors/cohere_reranker.mdx index a64cea55cabde52d802f0a5204324f4cadff5b38..b121c4eb9066d5aee2bf8a2de46a0ae2ea558dc6 100644 --- a/apps/next/src/content/docs/llamaindex/modules/node_postprocessors/cohere_reranker.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/node_postprocessors/cohere_reranker.mdx @@ -8,20 +8,28 @@ The Cohere Reranker is a postprocessor that uses the Cohere API to rerank the re Firstly, you will need to install the `llamaindex` package. -```bash -pnpm install llamaindex -``` +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/cohere @llamaindex/openai + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/cohere @llamaindex/openai + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/cohere @llamaindex/openai + ``` +</Tabs> Now, you will need to sign up for an API key at [Cohere](https://cohere.ai/). Once you have your API key you can import the necessary modules and create a new instance of the `CohereRerank` class. ```ts -import { - CohereRerank, - Document, - OpenAI, - VectorStoreIndex, - Settings, -} from "llamaindex"; +import { OpenAI } from "@llamaindex/openai"; +import { CohereRerank } from "@llamaindex/cohere"; +import { Document, Settings, VectorStoreIndex } from "llamaindex"; ``` ## Load and index documents diff --git a/apps/next/src/content/docs/llamaindex/modules/node_postprocessors/index.mdx b/apps/next/src/content/docs/llamaindex/modules/node_postprocessors/index.mdx index 8a6cd35f68cc2df03c39320cf15fc66765ad2723..8ef8c7969cad2ba8280ec2d2db26c03259b1824c 100644 --- a/apps/next/src/content/docs/llamaindex/modules/node_postprocessors/index.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/node_postprocessors/index.mdx @@ -2,6 +2,24 @@ title: Node Postprocessors --- +## Installation + +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/cohere @llamaindex/openai + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/cohere @llamaindex/openai + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/cohere @llamaindex/openai + ``` +</Tabs> + ## Concept Node postprocessors are a set of modules that take a set of nodes, and apply some kind of transformation or filtering before returning them. @@ -15,12 +33,8 @@ LlamaIndex offers several node postprocessors for immediate use, while also prov An example of using a node postprocessors is below: ```ts -import { - Node, - NodeWithScore, - SimilarityPostprocessor, - CohereRerank, -} from "llamaindex"; +import { CohereRerank } from "@llamaindex/cohere"; +import { Node, NodeWithScore, SimilarityPostprocessor, TextNode } from "llamaindex"; const nodes: NodeWithScore[] = [ { @@ -60,7 +74,9 @@ Most commonly, node-postprocessors will be used in a query engine, where they ar ### Using Node Postprocessors in a Query Engine ```ts -import { Node, NodeWithScore, SimilarityPostprocessor, CohereRerank, Settings } from "llamaindex"; +import { CohereRerank } from "@llamaindex/cohere"; +import { OpenAI } from "@llamaindex/openai"; +import { Node, NodeWithScore, SimilarityPostprocessor, Settings, TextNode } from "llamaindex"; // Use OpenAI LLM Settings.llm = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0.1 }); @@ -78,9 +94,9 @@ const nodes: NodeWithScore[] = [ // cohere rerank: rerank nodes given query using trained model const reranker = new CohereRerank({ - apiKey: "<COHERE_API_KEY>, + apiKey: "<COHERE_API_KEY>", topN: 2, -}) +}); const document = new Document({ text: "essay", id_: "essay" }); diff --git a/apps/next/src/content/docs/llamaindex/modules/node_postprocessors/jinaai_reranker.mdx b/apps/next/src/content/docs/llamaindex/modules/node_postprocessors/jinaai_reranker.mdx index a43145668cc69dfa795618fceda32e98801ff3c9..4856157b06df92d33c27cd29ea31d590cbbd5feb 100644 --- a/apps/next/src/content/docs/llamaindex/modules/node_postprocessors/jinaai_reranker.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/node_postprocessors/jinaai_reranker.mdx @@ -8,20 +8,28 @@ The Jina AI Reranker is a postprocessor that uses the Jina AI Reranker API to re Firstly, you will need to install the `llamaindex` package. -```bash -pnpm install llamaindex -``` +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/openai + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/openai + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/openai + ``` +</Tabs> + Now, you will need to sign up for an API key at [Jina AI](https://jina.ai/reranker). Once you have your API key you can import the necessary modules and create a new instance of the `JinaAIReranker` class. ```ts -import { - JinaAIReranker, - Document, - OpenAI, - VectorStoreIndex, - Settings, -} from "llamaindex"; +import { OpenAI } from "@llamaindex/openai"; +import { Document, Settings, VectorStoreIndex, JinaAIReranker } from "llamaindex"; ``` ## Load and index documents diff --git a/apps/next/src/content/docs/llamaindex/modules/node_postprocessors/mixedbreadiai_reranker.mdx b/apps/next/src/content/docs/llamaindex/modules/node_postprocessors/mixedbreadiai_reranker.mdx index 1a13a7ccbbd894dceba6d0f41ec2da06141c6c5d..2561812f8a8dfeea5cbc84a54ad9d5a0ccf345f7 100644 --- a/apps/next/src/content/docs/llamaindex/modules/node_postprocessors/mixedbreadiai_reranker.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/node_postprocessors/mixedbreadiai_reranker.mdx @@ -17,20 +17,33 @@ To find out more about the latest features and updates, visit the [mixedbread.ai First, you will need to install the `llamaindex` package. -```bash -pnpm install llamaindex -``` +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/openai @llamaindex/mixedbread + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/openai @llamaindex/mixedbread + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/openai @llamaindex/mixedbread + ``` +</Tabs> + Next, sign up for an API key at [mixedbread.ai](https://mixedbread.ai/). Once you have your API key, you can import the necessary modules and create a new instance of the `MixedbreadAIReranker` class. ```ts import { - MixedbreadAIReranker, Document, - OpenAI, VectorStoreIndex, Settings, } from "llamaindex"; +import { OpenAI } from "@llamaindex/openai"; +import { MixedbreadAIReranker } from "@llamaindex/mixedbread"; ``` ## Usage with LlamaIndex diff --git a/apps/next/src/content/docs/llamaindex/modules/query_engines/metadata_filtering.mdx b/apps/next/src/content/docs/llamaindex/modules/query_engines/metadata_filtering.mdx index 480edbf1ecaf0130e8f46c1013f5a2e8320ca9ac..937a4234b8245f79bd3174de6a3b4b13ee301495 100644 --- a/apps/next/src/content/docs/llamaindex/modules/query_engines/metadata_filtering.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/query_engines/metadata_filtering.mdx @@ -10,19 +10,27 @@ You can also check our multi-tenancy blog post to see how metadata filtering can Firstly if you haven't already, you need to install the `llamaindex` package: -```bash -pnpm i llamaindex -``` +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/openai @llamaindex/chroma + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/openai @llamaindex/chroma + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/openai @llamaindex/chroma + ``` +</Tabs> Then you can import the necessary modules from `llamaindex`: ```ts -import { - ChromaVectorStore, - Document, - VectorStoreIndex, - storageContextFromDefaults, -} from "llamaindex"; +import { Document, VectorStoreIndex, storageContextFromDefaults } from "llamaindex"; +import { ChromaVectorStore } from "@llamaindex/chroma"; const collectionName = "dog_colors"; ``` @@ -95,12 +103,8 @@ Besides using the equal operator (`==`), you can also use a whole set of differe ## Full Code ```ts -import { - ChromaVectorStore, - Document, - VectorStoreIndex, - storageContextFromDefaults, -} from "llamaindex"; +import { Document, VectorStoreIndex, storageContextFromDefaults } from "llamaindex"; +import { ChromaVectorStore } from "@llamaindex/chroma"; const collectionName = "dog_colors"; diff --git a/apps/next/src/content/docs/llamaindex/modules/query_engines/router_query_engine.mdx b/apps/next/src/content/docs/llamaindex/modules/query_engines/router_query_engine.mdx index 0cd4dc0001561045af866c74acbddbac716e59cd..5322ecbf2b2d7c37d12f9135a5398bed4e720cad 100644 --- a/apps/next/src/content/docs/llamaindex/modules/query_engines/router_query_engine.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/query_engines/router_query_engine.mdx @@ -8,13 +8,24 @@ In this tutorial, we define a custom router query engine that selects one out of First, we need to install import the necessary modules from `llamaindex`: -```bash -pnpm i lamaindex -``` +import { Tab, Tabs } from "fumadocs-ui/components/tabs"; + +<Tabs groupId="install" items={["npm", "yarn", "pnpm"]} persist> + ```shell tab="npm" + npm install llamaindex @llamaindex/openai @llamaindex/readers + ``` + + ```shell tab="yarn" + yarn add llamaindex @llamaindex/openai @llamaindex/readers + ``` + + ```shell tab="pnpm" + pnpm add llamaindex @llamaindex/openai @llamaindex/readers + ``` +</Tabs> ```ts import { - OpenAI, RouterQueryEngine, SimpleDirectoryReader, SentenceSplitter, @@ -22,6 +33,8 @@ import { VectorStoreIndex, Settings, } from "llamaindex"; +import { OpenAI } from "@llamaindex/openai"; +import { SimpleDirectoryReader } from "llamaindex"; ``` ## Loading Data @@ -103,7 +116,6 @@ console.log({ ```ts import { - OpenAI, RouterQueryEngine, SimpleDirectoryReader, SentenceSplitter, @@ -111,6 +123,8 @@ import { VectorStoreIndex, Settings, } from "llamaindex"; +import { OpenAI } from "@llamaindex/openai"; +import { SimpleDirectoryReader } from "llamaindex"; Settings.llm = new OpenAI(); Settings.nodeParser = new SentenceSplitter({ diff --git a/apps/next/src/content/docs/llamaindex/modules/response_synthesizer.mdx b/apps/next/src/content/docs/llamaindex/modules/response_synthesizer.mdx index 88e3509307116f91fbe342d99897a6dc0dfab115..bda0d53bfdb70fb5f2886256a66c0f5551f93b2b 100644 --- a/apps/next/src/content/docs/llamaindex/modules/response_synthesizer.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/response_synthesizer.mdx @@ -18,7 +18,7 @@ The ResponseSynthesizer is responsible for sending the query, nodes, and prompt chunk. ```typescript -import { NodeWithScore, ResponseSynthesizer, TextNode } from "llamaindex"; +import { NodeWithScore, TextNode, ResponseSynthesizer } from "llamaindex"; const responseSynthesizer = new ResponseSynthesizer(); diff --git a/apps/next/src/content/docs/llamaindex/modules/workflows.mdx b/apps/next/src/content/docs/llamaindex/modules/workflows.mdx index 63f176555714c9ded2311fc3509b8997f094fddf..c1938b2e4466a142793b2e98691746baac7d3365 100644 --- a/apps/next/src/content/docs/llamaindex/modules/workflows.mdx +++ b/apps/next/src/content/docs/llamaindex/modules/workflows.mdx @@ -116,7 +116,7 @@ const jokeFlow = new Workflow({ verbose: true, validate: true }); Optionally, you can choose to use global context between steps. For example, maybe multiple steps access the original `query` input from the user. You can store this in global context so that every step has access. ```typescript -import { Context } from "@llamaindex/core/workflow"; +import { Context } from "llamaindex"; const query = async (context: Context, ev: MyEvent) => { // get the query from the context diff --git a/e2e/examples/llama-parse-browser/src/main.ts b/e2e/examples/llama-parse-browser/src/main.ts index 4362227c9f7820834fe70c5da365ff969250d84e..acf58b1be6bfcff2611d69ee274fafc516715d22 100644 --- a/e2e/examples/llama-parse-browser/src/main.ts +++ b/e2e/examples/llama-parse-browser/src/main.ts @@ -1,4 +1,4 @@ -import { LlamaParseReader } from "@llamaindex/cloud/reader"; +import { LlamaParseReader } from "@llamaindex/cloud"; import "./style.css"; new LlamaParseReader(); diff --git a/examples/Settings.ts b/examples/Settings.ts index 47778d75744e9cc1aa87481c4b849a934385d842..bf1de43178de09bfb0742162c6a5d902d546af11 100644 --- a/examples/Settings.ts +++ b/examples/Settings.ts @@ -1,6 +1,7 @@ import fs from "node:fs/promises"; -import { Document, OpenAI, Settings, VectorStoreIndex } from "llamaindex"; +import { OpenAI } from "@llamaindex/openai"; +import { Document, Settings, VectorStoreIndex } from "llamaindex"; Settings.llm = new OpenAI({ model: "gpt-4" }); diff --git a/examples/agent/large_toolcall.ts b/examples/agent/large_toolcall.ts index e558e2dc58352a8d0326c84dc013cad505918119..3cb677465f75f4f15199bf5261fedff5f242e690 100644 --- a/examples/agent/large_toolcall.ts +++ b/examples/agent/large_toolcall.ts @@ -1,4 +1,5 @@ -import { FunctionTool, OpenAI, OpenAIAgent } from "llamaindex"; +import { OpenAI, OpenAIAgent } from "@llamaindex/openai"; +import { FunctionTool } from "llamaindex"; const csvData = "TITLE,RELEASE_YEAR,SCORE,NUMBER_OF_VOTES,DURATION,MAIN_GENRE,MAIN_PRODUCTION\nDavid Attenborough: A Life on Our Planet,2020,9,31180,83,documentary,GB\nInception,2010,8.8,2268288,148,scifi,GB\nForrest Gump,1994,8.8,1994599,142,drama,US\nAnbe Sivam,2003,8.7,20595,160,comedy,IN\nBo Burnham: Inside,2021,8.7,44074,87,comedy,US\nSaving Private Ryan,1998,8.6,1346020,169,drama,US\nDjango Unchained,2012,8.4,1472668,165,western,US\nDangal,2016,8.4,180247,161,action,IN\nBo Burnham: Make Happy,2016,8.4,14356,60,comedy,US\nLouis C.K.: Hilarious,2010,8.4,11973,84,comedy,US\nDave Chappelle: Sticks & Stones,2019,8.4,25687,65,comedy,US\n3 Idiots,2009,8.4,385782,170,comedy,IN\nBlack Friday,2004,8.4,20611,143,crime,IN\nSuper Deluxe,2019,8.4,13680,176,thriller,IN\nWinter on Fire: Ukraine's Fight for Freedom,2015,8.3,17710,98,documentary,UA\nOnce Upon a Time in America,1984,8.3,342335,229,drama,US\nTaxi Driver,1976,8.3,795222,113,crime,US\nLike Stars on Earth,2007,8.3,188234,165,drama,IN\nBo Burnham: What.,2013,8.3,11488,60,comedy,US\nFull Metal Jacket,1987,8.3,723306,116,drama,GB\nWarrior,2011,8.2,463276,140,drama,US\nDrishyam,2015,8.2,79075,163,thriller,IN\nQueen,2014,8.2,64805,146,drama,IN\nPaan Singh Tomar,2012,8.2,35888,135,drama,IN"; diff --git a/examples/agent/large_toolcall_with_gpt4o.ts b/examples/agent/large_toolcall_with_gpt4o.ts index 522adf1e2863580f5995a68ff16c4dc089a73cac..bce2d12f9c5f1909d0eb4d986a2d745d8a7b8b3f 100644 --- a/examples/agent/large_toolcall_with_gpt4o.ts +++ b/examples/agent/large_toolcall_with_gpt4o.ts @@ -1,4 +1,5 @@ -import { FunctionTool, OpenAI, ToolCallOptions } from "llamaindex"; +import { OpenAI } from "@llamaindex/openai"; +import { FunctionTool, ToolCallOptions } from "llamaindex"; (async () => { // The tool call will generate a partial JSON for `gpt-4-turbo` diff --git a/examples/agent/multi_document_agent.ts b/examples/agent/multi_document_agent.ts index 4cdd66a750dbe8d9a25734b23eee0aefd880c41c..ae69772bd0233640b3cbcce26cd4543cbcdedcc2 100644 --- a/examples/agent/multi_document_agent.ts +++ b/examples/agent/multi_document_agent.ts @@ -1,10 +1,9 @@ import fs from "node:fs/promises"; +import { OpenAI, OpenAIAgent } from "@llamaindex/openai"; import { Document, ObjectIndex, - OpenAI, - OpenAIAgent, QueryEngineTool, SentenceSplitter, Settings, diff --git a/examples/agent/openai-task.ts b/examples/agent/openai-task.ts index 22a602d9397e68dd14311a84262c4562f7ed321b..840b4fe645b710398d23cdfd27696e1c0eca2e51 100644 --- a/examples/agent/openai-task.ts +++ b/examples/agent/openai-task.ts @@ -1,4 +1,5 @@ -import { ChatResponseChunk, OpenAIAgent } from "llamaindex"; +import { OpenAIAgent } from "@llamaindex/openai"; +import { ChatResponseChunk } from "llamaindex"; import { getCurrentIDTool, getUserInfoTool, diff --git a/examples/agent/openai.ts b/examples/agent/openai.ts index a0780b7c7e836cc78efbd9a0b967a8e38bb4ab1c..cd44ac5ee2976e54322f50bbef40e279caf813ce 100644 --- a/examples/agent/openai.ts +++ b/examples/agent/openai.ts @@ -1,4 +1,5 @@ -import { FunctionTool, OpenAIAgent } from "llamaindex"; +import { OpenAIAgent } from "@llamaindex/openai"; +import { FunctionTool } from "llamaindex"; const sumNumbers = FunctionTool.from( ({ a, b }: { a: number; b: number }) => `${a + b}`, diff --git a/examples/agent/query_openai_agent.ts b/examples/agent/query_openai_agent.ts index dc8b63b9f0610b4f518d22610f7086d03e421d15..7ad7d0760a0ed3e8b928dafd97f23388c9f5ff00 100644 --- a/examples/agent/query_openai_agent.ts +++ b/examples/agent/query_openai_agent.ts @@ -1,5 +1,5 @@ +import { OpenAIAgent } from "@llamaindex/openai"; import { - OpenAIAgent, QueryEngineTool, SimpleDirectoryReader, VectorStoreIndex, diff --git a/examples/agent/react_agent.ts b/examples/agent/react_agent.ts index bb094237f39631813761a2de9d9d25633ac40289..504a4615ecb24f32140a7927fdc125a366475bb9 100644 --- a/examples/agent/react_agent.ts +++ b/examples/agent/react_agent.ts @@ -1,4 +1,5 @@ -import { Anthropic, FunctionTool, ReActAgent } from "llamaindex"; +import { Anthropic } from "@llamaindex/anthropic"; +import { FunctionTool, ReActAgent } from "llamaindex"; // Define a function to sum two numbers function sumNumbers({ a, b }: { a: number; b: number }) { diff --git a/examples/agent/retriever_openai_agent.ts b/examples/agent/retriever_openai_agent.ts index e9e5d84aa693ec2acd732e9d6b42f788ccba6bc2..a3b19628517cbb7ff44bfbcb1c98a18d670a82d6 100644 --- a/examples/agent/retriever_openai_agent.ts +++ b/examples/agent/retriever_openai_agent.ts @@ -1,8 +1,8 @@ +import { OpenAIAgent } from "@llamaindex/openai"; import { FunctionTool, MetadataMode, NodeWithScore, - OpenAIAgent, SimpleDirectoryReader, VectorStoreIndex, } from "llamaindex"; diff --git a/examples/agent/step_wise_query_tool.ts b/examples/agent/step_wise_query_tool.ts index 829d35f26e55aa7c09acb66b87b55cb26e999dcb..c3d0f575d3c35c810b15c9e5351e8c1a55a72eb9 100644 --- a/examples/agent/step_wise_query_tool.ts +++ b/examples/agent/step_wise_query_tool.ts @@ -1,5 +1,5 @@ +import { OpenAIAgent } from "@llamaindex/openai"; import { - OpenAIAgent, QueryEngineTool, SimpleDirectoryReader, VectorStoreIndex, diff --git a/examples/agent/step_wise_react.ts b/examples/agent/step_wise_react.ts index ae1cdf75146e4207e5640862b5dda5bd9a40ad38..e6d751b475dc9797ce14c6fa9e4cff97bcbe3aee 100644 --- a/examples/agent/step_wise_react.ts +++ b/examples/agent/step_wise_react.ts @@ -1,4 +1,5 @@ -import { Anthropic, FunctionTool, ReActAgent } from "llamaindex"; +import { Anthropic } from "@llamaindex/anthropic"; +import { FunctionTool, ReActAgent } from "llamaindex"; // Define a function to sum two numbers function sumNumbers({ a, b }: { a: number; b: number }) { diff --git a/examples/agent/stream_openai_agent.ts b/examples/agent/stream_openai_agent.ts index 4d8d6e8fcbd4cf3bc52f6923b630e2b17cdece09..7deea96ad96adcd593cc3db3212485f628409e89 100644 --- a/examples/agent/stream_openai_agent.ts +++ b/examples/agent/stream_openai_agent.ts @@ -1,4 +1,5 @@ -import { FunctionTool, OpenAIAgent } from "llamaindex"; +import { OpenAIAgent } from "@llamaindex/openai"; +import { FunctionTool } from "llamaindex"; // Define a function to sum two numbers function sumNumbers({ a, b }: { a: number; b: number }) { diff --git a/examples/agent/wiki.ts b/examples/agent/wiki.ts index 64d556fe1a78385cc233fc317e95ef80cfa958f9..d09e8149eabe4465f483ea8c4b3b7a657e0280a3 100644 --- a/examples/agent/wiki.ts +++ b/examples/agent/wiki.ts @@ -1,4 +1,4 @@ -import { OpenAI, OpenAIAgent } from "llamaindex"; +import { OpenAI, OpenAIAgent } from "@llamaindex/openai"; import { WikipediaTool } from "../wiki"; async function main() { diff --git a/examples/anthropic/agent.ts b/examples/anthropic/agent.ts index 8892dc40ef852f76b433ce05fd0f2928b2bc64de..5d146e104dc8e255f1cf74a3dc66f475229e6d2e 100644 --- a/examples/anthropic/agent.ts +++ b/examples/anthropic/agent.ts @@ -1,5 +1,5 @@ -import { Anthropic, FunctionTool, Settings } from "llamaindex"; -import { AnthropicAgent } from "llamaindex/agent/anthropic"; +import { Anthropic, AnthropicAgent } from "@llamaindex/anthropic"; +import { FunctionTool, Settings } from "llamaindex"; import { WikipediaTool } from "../wiki"; Settings.callbackManager.on("llm-tool-call", (event) => { diff --git a/examples/anthropic/chat.ts b/examples/anthropic/chat.ts index 48117f89192ade69ed13e8120575868d7d4a08c1..5663d91ea95a73c24ab658efa918c096ed25bfe8 100644 --- a/examples/anthropic/chat.ts +++ b/examples/anthropic/chat.ts @@ -1,4 +1,4 @@ -import { Anthropic } from "llamaindex"; +import { Anthropic } from "@llamaindex/anthropic"; (async () => { const anthropic = new Anthropic({ diff --git a/examples/anthropic/chat_interactive.ts b/examples/anthropic/chat_interactive.ts index 3f33b268fb7f46ea971273ffa6ba9e392461809b..d4ee02fb704c2a2ab56f5eec75c3452636e37f87 100644 --- a/examples/anthropic/chat_interactive.ts +++ b/examples/anthropic/chat_interactive.ts @@ -1,4 +1,5 @@ -import { Anthropic, ChatMemoryBuffer, SimpleChatEngine } from "llamaindex"; +import { Anthropic } from "@llamaindex/anthropic"; +import { ChatMemoryBuffer, SimpleChatEngine } from "llamaindex"; import { stdin as input, stdout as output } from "node:process"; import readline from "node:readline/promises"; diff --git a/examples/anthropic/haiku.ts b/examples/anthropic/haiku.ts index 1e43d23b261b8371191dfc212dd0d80ecf71af27..459e56f2509dcc131b36be1311f65e28cae4a2af 100644 --- a/examples/anthropic/haiku.ts +++ b/examples/anthropic/haiku.ts @@ -1,4 +1,4 @@ -import { Anthropic } from "llamaindex"; +import { Anthropic } from "@llamaindex/anthropic"; (async () => { const anthropic = new Anthropic({ diff --git a/examples/anthropic/prompt-caching.ts b/examples/anthropic/prompt-caching.ts index 2f9182e9a6e256abbfbc28a730711e12a6f4b710..e7d371c508f64cc9c9b501898acf8b3c1345a6e8 100644 --- a/examples/anthropic/prompt-caching.ts +++ b/examples/anthropic/prompt-caching.ts @@ -1,4 +1,4 @@ -import { Anthropic } from "llamaindex"; +import { Anthropic } from "@llamaindex/anthropic"; async function main() { const anthropic = new Anthropic({ diff --git a/examples/anthropic/stream.ts b/examples/anthropic/stream.ts index a91ed0422e67a8825eae7cf09e2c73041bd88d42..17b07bc55992b7f03e3bde189a5f242cb60fb08d 100644 --- a/examples/anthropic/stream.ts +++ b/examples/anthropic/stream.ts @@ -1,4 +1,4 @@ -import { Anthropic } from "llamaindex"; +import { Anthropic } from "@llamaindex/anthropic"; (async () => { const anthropic = new Anthropic({ diff --git a/examples/astradb/example.ts b/examples/astradb/example.ts index e576dcacc20368857b6bf789473b60c921041c8a..3b8028bb184a5a952a9746f1c4d6a6f06134ddfb 100644 --- a/examples/astradb/example.ts +++ b/examples/astradb/example.ts @@ -1,5 +1,5 @@ +import { AstraDBVectorStore } from "@llamaindex/astra"; import { - AstraDBVectorStore, Document, MetadataFilters, storageContextFromDefaults, diff --git a/examples/astradb/load.ts b/examples/astradb/load.ts index 12f544754f6216758c7f37af6d83fc7493d65739..b8e8cbfff95d10f90cf1bdcf10c5b362e4dcc744 100644 --- a/examples/astradb/load.ts +++ b/examples/astradb/load.ts @@ -1,9 +1,6 @@ -import { - AstraDBVectorStore, - CSVReader, - storageContextFromDefaults, - VectorStoreIndex, -} from "llamaindex"; +import { AstraDBVectorStore } from "@llamaindex/astra"; +import { CSVReader } from "@llamaindex/readers/csv"; +import { storageContextFromDefaults, VectorStoreIndex } from "llamaindex"; const collectionName = "movie_reviews"; diff --git a/examples/astradb/query.ts b/examples/astradb/query.ts index f0469cb1785054d348ecebf0d9e22f0fcb329a92..412e339a6575c2255448699ef29abca8bc2dcc2f 100644 --- a/examples/astradb/query.ts +++ b/examples/astradb/query.ts @@ -1,8 +1,5 @@ -import { - AstraDBVectorStore, - VectorStoreIndex, - serviceContextFromDefaults, -} from "llamaindex"; +import { AstraDBVectorStore } from "@llamaindex/astra"; +import { VectorStoreIndex, serviceContextFromDefaults } from "llamaindex"; const collectionName = "movie_reviews"; diff --git a/examples/azure-cosmosdb.ts b/examples/azure-cosmosdb.ts index fa9999dd654b48d3465d9b149302916a240f468d..0f56a79f246763e7fe0eaf79644ac5d04913092d 100644 --- a/examples/azure-cosmosdb.ts +++ b/examples/azure-cosmosdb.ts @@ -8,13 +8,15 @@ import { AzureCosmosDBNoSqlVectorStore, AzureCosmosNoSqlDocumentStore, AzureCosmosNoSqlIndexStore, +} from "@llamaindex/azure"; +import { OpenAI, OpenAIEmbedding } from "@llamaindex/openai"; +import { Document, - OpenAI, - OpenAIEmbedding, Settings, storageContextFromDefaults, VectorStoreIndex, } from "llamaindex"; + /** * This example demonstrates how to use Azure CosmosDB with LlamaIndex. * It uses Azure CosmosDB as IndexStore, DocumentStore, and VectorStore. diff --git a/examples/azure/azure-openai.ts b/examples/azure/azure-openai.ts index b463f686f0e02441b1ccba3ccb8601f44ca3665a..ce99eb6cb1f8e357a7546c75aeeacedce5dfb93b 100644 --- a/examples/azure/azure-openai.ts +++ b/examples/azure/azure-openai.ts @@ -2,8 +2,8 @@ import { DefaultAzureCredential, getBearerTokenProvider, } from "@azure/identity"; +import { OpenAI, OpenAIEmbedding } from "@llamaindex/openai"; import "dotenv/config"; -import { OpenAI, OpenAIEmbedding } from "llamaindex"; const AZURE_COGNITIVE_SERVICES_SCOPE = "https://cognitiveservices.azure.com/.default"; diff --git a/examples/azure/azure_dynamic_session.ts b/examples/azure/azure_dynamic_session.ts index 31d5375c7e223d53c85d529ceb8095a50938779a..4dc84e024f0331615c4d06de264ebe4418deabac 100644 --- a/examples/azure/azure_dynamic_session.ts +++ b/examples/azure/azure_dynamic_session.ts @@ -4,7 +4,9 @@ import { DefaultAzureCredential, getBearerTokenProvider, } from "@azure/identity"; -import { AzureDynamicSessionTool, OpenAI, ReActAgent } from "llamaindex"; +import { AzureDynamicSessionTool } from "@llamaindex/azure"; +import { OpenAI } from "@llamaindex/openai"; +import { ReActAgent } from "llamaindex"; async function main() { const credential = new DefaultAzureCredential(); diff --git a/examples/chatHistory.ts b/examples/chatHistory.ts index c55c618d69ac5eb64ee20812ef11bb10e67bc68f..7d4b3caa3da9f7bec83b23bcb3bacc00562d7b4e 100644 --- a/examples/chatHistory.ts +++ b/examples/chatHistory.ts @@ -1,9 +1,9 @@ import { stdin as input, stdout as output } from "node:process"; import readline from "node:readline/promises"; +import { OpenAI } from "@llamaindex/openai"; import { ChatSummaryMemoryBuffer, - OpenAI, Settings, SimpleChatEngine, } from "llamaindex"; diff --git a/examples/chromadb/preFilters.ts b/examples/chromadb/preFilters.ts index 5b71878b13671fd9a6797aa41cebfa82d02e5c2d..764880a6bbcad76e2bce962a140c4f109c088a85 100644 --- a/examples/chromadb/preFilters.ts +++ b/examples/chromadb/preFilters.ts @@ -1,5 +1,5 @@ +import { ChromaVectorStore } from "@llamaindex/chroma"; import { - ChromaVectorStore, Document, MetadataFilters, VectorStoreIndex, diff --git a/examples/chromadb/test.ts b/examples/chromadb/test.ts index dbdcfa36730e8ddf137cc8ea0ca7a59d090007e9..c22466d176db5afcb86f79efe327da56ffc75975 100644 --- a/examples/chromadb/test.ts +++ b/examples/chromadb/test.ts @@ -1,9 +1,6 @@ -import { - ChromaVectorStore, - CSVReader, - storageContextFromDefaults, - VectorStoreIndex, -} from "llamaindex"; +import { ChromaVectorStore } from "@llamaindex/chroma"; +import { CSVReader } from "@llamaindex/readers/csv"; +import { storageContextFromDefaults, VectorStoreIndex } from "llamaindex"; const collectionName = "movie_reviews"; diff --git a/examples/cosmosdb/loadVectorData.ts b/examples/cosmosdb/loadVectorData.ts index cfb043b15b1adbf32d86832233d84eb74bd4210e..194f0c8b1769c6252e2e221b4f632dce66170ec6 100644 --- a/examples/cosmosdb/loadVectorData.ts +++ b/examples/cosmosdb/loadVectorData.ts @@ -1,14 +1,13 @@ import { CosmosClient } from "@azure/cosmos"; import { DefaultAzureCredential } from "@azure/identity"; +import { AzureCosmosDBNoSQLConfig } from "@llamaindex/azure"; +import { OpenAI, OpenAIEmbedding } from "@llamaindex/openai"; import { SimpleCosmosDBReader, SimpleCosmosDBReaderLoaderConfig, } from "@llamaindex/readers/cosmosdb"; import * as dotenv from "dotenv"; import { - AzureCosmosDBNoSQLConfig, - OpenAI, - OpenAIEmbedding, Settings, storageContextFromDefaults, VectorStoreIndex, diff --git a/examples/cosmosdb/queryVectorData.ts b/examples/cosmosdb/queryVectorData.ts index 13e9604831369c567af7781d9ccc49001bc66f63..9d1cfbc7e1f09bdad772c81cc093efe6c03f1850 100644 --- a/examples/cosmosdb/queryVectorData.ts +++ b/examples/cosmosdb/queryVectorData.ts @@ -1,10 +1,9 @@ import { CosmosClient } from "@azure/cosmos"; import { DefaultAzureCredential } from "@azure/identity"; +import { AzureCosmosDBNoSQLConfig } from "@llamaindex/azure"; +import { OpenAI, OpenAIEmbedding } from "@llamaindex/openai"; import * as dotenv from "dotenv"; import { - AzureCosmosDBNoSQLConfig, - OpenAI, - OpenAIEmbedding, Settings, storageContextFromDefaults, VectorStoreIndex, diff --git a/examples/cosmosdb/utils.ts b/examples/cosmosdb/utils.ts index 0a1328d019a0e9a14fd64e23f5530f85b6f110be..2b45d7698fe5e35a176688edd465308a817fa3d4 100644 --- a/examples/cosmosdb/utils.ts +++ b/examples/cosmosdb/utils.ts @@ -4,7 +4,7 @@ import { AzureCosmosDBNoSqlVectorStore, AzureCosmosNoSqlDocumentStore, AzureCosmosNoSqlIndexStore, -} from "llamaindex"; +} from "@llamaindex/azure"; /** * Util function to create AzureCosmosDB vectorStore, docStore, indexStore from connection string. diff --git a/examples/deepinfra/chat.ts b/examples/deepinfra/chat.ts index 88bd2b1c18d91494ef50145fb8256fcfd38a4976..c3c31bb16a6af999353104d076ad3444a818b560 100644 --- a/examples/deepinfra/chat.ts +++ b/examples/deepinfra/chat.ts @@ -1,4 +1,4 @@ -import { DeepInfra } from "llamaindex"; +import { DeepInfra } from "@llamaindex/deepinfra"; (async () => { if (!process.env.DEEPINFRA_API_TOKEN) { diff --git a/examples/deepinfra/embedding.ts b/examples/deepinfra/embedding.ts index adf494209099b960054c7ab25a2f0a40fe2615c8..1d0dd224afb647bf99907ff234678d98552e8d0d 100644 --- a/examples/deepinfra/embedding.ts +++ b/examples/deepinfra/embedding.ts @@ -1,4 +1,4 @@ -import { DeepInfraEmbedding } from "llamaindex"; +import { DeepInfraEmbedding } from "@llamaindex/deepinfra"; async function main() { // API token can be provided as an environment variable too diff --git a/examples/evaluation/correctness.ts b/examples/evaluation/correctness.ts index f9f9b7cbbc932da4e9e28b8d140175d478599774..af8970830d4d5a798a71add6436f3b7bf8cdb8f9 100644 --- a/examples/evaluation/correctness.ts +++ b/examples/evaluation/correctness.ts @@ -1,4 +1,5 @@ -import { CorrectnessEvaluator, OpenAI, Settings } from "llamaindex"; +import { OpenAI } from "@llamaindex/openai"; +import { CorrectnessEvaluator, Settings } from "llamaindex"; // Update llm to use OpenAI Settings.llm = new OpenAI({ model: "gpt-4" }); diff --git a/examples/evaluation/faithfulness.ts b/examples/evaluation/faithfulness.ts index f5b48288f3a30e7bbc8763e6dfc6a74b9c3728ff..d47e0cb3eb296a19c5da9e482541fada8a1dab2e 100644 --- a/examples/evaluation/faithfulness.ts +++ b/examples/evaluation/faithfulness.ts @@ -1,7 +1,7 @@ +import { OpenAI } from "@llamaindex/openai"; import { Document, FaithfulnessEvaluator, - OpenAI, Settings, VectorStoreIndex, } from "llamaindex"; diff --git a/examples/evaluation/relevancy.ts b/examples/evaluation/relevancy.ts index 91360783cdcc30a78a0ddae81b897d33c2002604..37b4fd1bce9d9c20b23d08295b9946957517736c 100644 --- a/examples/evaluation/relevancy.ts +++ b/examples/evaluation/relevancy.ts @@ -1,6 +1,6 @@ +import { OpenAI } from "@llamaindex/openai"; import { Document, - OpenAI, RelevancyEvaluator, Settings, VectorStoreIndex, diff --git a/examples/extractors/keywordExtractor.ts b/examples/extractors/keywordExtractor.ts index 78095cefb5db8f2bbec1c2f28f73bc773090af78..0a5a1433edfb5b9893e2a42aa6bf9abf03763d7b 100644 --- a/examples/extractors/keywordExtractor.ts +++ b/examples/extractors/keywordExtractor.ts @@ -1,9 +1,5 @@ -import { - Document, - KeywordExtractor, - OpenAI, - SentenceSplitter, -} from "llamaindex"; +import { OpenAI } from "@llamaindex/openai"; +import { Document, KeywordExtractor, SentenceSplitter } from "llamaindex"; (async () => { const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 }); diff --git a/examples/extractors/questionsAnsweredExtractor.ts b/examples/extractors/questionsAnsweredExtractor.ts index d9a4f8a1b95ab12091e71309e2d8196f432852c0..ee518b4c47250fe5141c8e86358ac4c47e17def7 100644 --- a/examples/extractors/questionsAnsweredExtractor.ts +++ b/examples/extractors/questionsAnsweredExtractor.ts @@ -1,6 +1,6 @@ +import { OpenAI } from "@llamaindex/openai"; import { Document, - OpenAI, QuestionsAnsweredExtractor, SentenceSplitter, } from "llamaindex"; diff --git a/examples/extractors/summaryExtractor.ts b/examples/extractors/summaryExtractor.ts index f7e38c1f4e655c2616a3ce7594c135b878a4927e..5cdb6e5053cb7f73454776c8b81734d4cefc8f22 100644 --- a/examples/extractors/summaryExtractor.ts +++ b/examples/extractors/summaryExtractor.ts @@ -1,9 +1,5 @@ -import { - Document, - OpenAI, - SentenceSplitter, - SummaryExtractor, -} from "llamaindex"; +import { OpenAI } from "@llamaindex/openai"; +import { Document, SentenceSplitter, SummaryExtractor } from "llamaindex"; (async () => { const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 }); diff --git a/examples/extractors/titleExtractor.ts b/examples/extractors/titleExtractor.ts index cdd22d9303a9611613f1b30f4dc2fdcfd512905c..b117ad1a1f491b5cadf68414602137a49ec51187 100644 --- a/examples/extractors/titleExtractor.ts +++ b/examples/extractors/titleExtractor.ts @@ -1,4 +1,5 @@ -import { Document, OpenAI, SentenceSplitter, TitleExtractor } from "llamaindex"; +import { OpenAI } from "@llamaindex/openai"; +import { Document, SentenceSplitter, TitleExtractor } from "llamaindex"; import essay from "../essay"; diff --git a/examples/gemini/agent.ts b/examples/gemini/agent.ts index 1099d89a19bca89a4428fc8c15977d2c9b1ec06c..fb4a292d7074da89015ac1e5a73882b5fed02ecc 100644 --- a/examples/gemini/agent.ts +++ b/examples/gemini/agent.ts @@ -1,10 +1,5 @@ -import { - FunctionTool, - Gemini, - GEMINI_MODEL, - LLMAgent, - Settings, -} from "llamaindex"; +import { Gemini, GEMINI_MODEL } from "@llamaindex/google"; +import { FunctionTool, LLMAgent, Settings } from "llamaindex"; Settings.callbackManager.on("llm-tool-call", (event) => { console.log(event.detail); diff --git a/examples/gemini/chat.ts b/examples/gemini/chat.ts index e4e304f3859152c99c5243e94a549832c8b94045..1b9a8d9a50340714d07465bb3608740b6c016864 100644 --- a/examples/gemini/chat.ts +++ b/examples/gemini/chat.ts @@ -1,4 +1,4 @@ -import { Gemini, GEMINI_MODEL } from "llamaindex"; +import { Gemini, GEMINI_MODEL } from "@llamaindex/google"; (async () => { if (!process.env.GOOGLE_API_KEY) { diff --git a/examples/gemini/chatVertex.ts b/examples/gemini/chatVertex.ts index 47936aaf2271ad4c09b08bec8c63e7c0d9198d8e..232ba8ef2f305b5b04e217587eb0d31e6556aa02 100644 --- a/examples/gemini/chatVertex.ts +++ b/examples/gemini/chatVertex.ts @@ -1,4 +1,4 @@ -import { Gemini, GEMINI_MODEL, GeminiVertexSession } from "llamaindex"; +import { Gemini, GEMINI_MODEL, GeminiVertexSession } from "@llamaindex/google"; (async () => { const gemini = new Gemini({ diff --git a/examples/gemini/embedding.ts b/examples/gemini/embedding.ts index 6ecc2692ba67f4faa4b8c3dfbe88724b7a63d85e..fc90497671b472594d9b1c629d4e2ffc47d85bfc 100644 --- a/examples/gemini/embedding.ts +++ b/examples/gemini/embedding.ts @@ -1,4 +1,4 @@ -import { GEMINI_EMBEDDING_MODEL, GeminiEmbedding } from "llamaindex"; +import { GEMINI_EMBEDDING_MODEL, GeminiEmbedding } from "@llamaindex/google"; async function main() { if (!process.env.GOOGLE_API_KEY) { diff --git a/examples/gptllama.ts b/examples/gptllama.ts index 71ff0ecfa64ba2d97a3e9ed0062aea9b4878e8cf..66a517fcf76745783a93992de3d2cd3ced0c6442 100644 --- a/examples/gptllama.ts +++ b/examples/gptllama.ts @@ -1,7 +1,9 @@ import { stdin as input, stdout as output } from "node:process"; import readline from "node:readline/promises"; -import { ChatMessage, LlamaDeuce, OpenAI } from "llamaindex"; +import { OpenAI } from "@llamaindex/openai"; +import { LlamaDeuce } from "@llamaindex/replicate"; +import { ChatMessage } from "llamaindex"; (async () => { const gpt4 = new OpenAI({ model: "gpt-4", temperature: 0.9 }); diff --git a/examples/gptturbollama3.ts b/examples/gptturbollama3.ts index 95209cb7c2444941377f8c97427144b806257db6..afb44755f3e7b1ef6361cd43a17f4f6466cdc338 100644 --- a/examples/gptturbollama3.ts +++ b/examples/gptturbollama3.ts @@ -1,7 +1,9 @@ import { stdin as input, stdout as output } from "node:process"; import readline from "node:readline/promises"; -import { ChatMessage, OpenAI, ReplicateLLM } from "llamaindex"; +import { OpenAI } from "@llamaindex/openai"; +import { ReplicateLLM } from "@llamaindex/replicate"; +import { ChatMessage } from "llamaindex"; (async () => { const gpt4 = new OpenAI({ model: "gpt-4-turbo", temperature: 0.9 }); diff --git a/examples/groq.ts b/examples/groq.ts index 1c7b4b9af2831e37d4ae227a007860c53c86725e..284813c6c539fe84f621cb781be79542142e6e11 100644 --- a/examples/groq.ts +++ b/examples/groq.ts @@ -1,12 +1,8 @@ import fs from "node:fs/promises"; -import { - Document, - Groq, - HuggingFaceEmbedding, - Settings, - VectorStoreIndex, -} from "llamaindex"; +import { Groq } from "@llamaindex/groq"; +import { HuggingFaceEmbedding } from "@llamaindex/huggingface"; +import { Document, Settings, VectorStoreIndex } from "llamaindex"; // Update llm to use Groq Settings.llm = new Groq({ diff --git a/examples/huggingface/chat.ts b/examples/huggingface/chat.ts index e256f73b14d3401f342fe7ea8f7dcd714056ed41..fdea312fc8e53c3bc2f818b6410a1278f286f160 100644 --- a/examples/huggingface/chat.ts +++ b/examples/huggingface/chat.ts @@ -1,4 +1,4 @@ -import { HuggingFaceInferenceAPI } from "llamaindex"; +import { HuggingFaceInferenceAPI } from "@llamaindex/huggingface"; (async () => { if (!process.env.HUGGING_FACE_TOKEN) { diff --git a/examples/huggingface/embedding.ts b/examples/huggingface/embedding.ts index 8297b75366a47944bb56de0550e8df14b91694b0..4886c9ac2822f761a8d63a6f8c288200a634f9d5 100644 --- a/examples/huggingface/embedding.ts +++ b/examples/huggingface/embedding.ts @@ -1,12 +1,10 @@ import fs from "node:fs/promises"; import { - Document, HuggingFaceEmbedding, HuggingFaceEmbeddingModelType, - Settings, - VectorStoreIndex, -} from "llamaindex"; +} from "@llamaindex/huggingface"; +import { Document, Settings, VectorStoreIndex } from "llamaindex"; // Update embed model Settings.embedModel = new HuggingFaceEmbedding({ diff --git a/examples/huggingface/embeddingApi.ts b/examples/huggingface/embeddingApi.ts index a89df27036ad8d2c05092b5e3f65c531f03703f5..bc22765c9824645609a5c23bdd5c3649e6c2ae9c 100644 --- a/examples/huggingface/embeddingApi.ts +++ b/examples/huggingface/embeddingApi.ts @@ -1,12 +1,10 @@ import fs from "node:fs/promises"; import { - Document, HuggingFaceInferenceAPI, HuggingFaceInferenceAPIEmbedding, - Settings, - VectorStoreIndex, -} from "llamaindex"; +} from "@llamaindex/huggingface"; +import { Document, Settings, VectorStoreIndex } from "llamaindex"; if (!process.env.HUGGING_FACE_TOKEN) { throw new Error("Please set the HUGGING_FACE_TOKEN environment variable."); diff --git a/examples/huggingface/local.ts b/examples/huggingface/local.ts index b2d3cd0d1ca48004f24fdaab28a96921052d38d6..878239e879d989c9127dc80f0d20cf263772e7d0 100644 --- a/examples/huggingface/local.ts +++ b/examples/huggingface/local.ts @@ -1,4 +1,4 @@ -import { HuggingFaceLLM } from "llamaindex"; +import { HuggingFaceLLM } from "@llamaindex/huggingface"; (async () => { const hf = new HuggingFaceLLM(); diff --git a/examples/ingestion/basicIngestion.ts b/examples/ingestion/basicIngestion.ts index 9af5711a1b69dd17faee46ef7bcd16d309330fde..f3ae7269c0ac08be846b70e03c5ed915bac0bff6 100644 --- a/examples/ingestion/basicIngestion.ts +++ b/examples/ingestion/basicIngestion.ts @@ -1,7 +1,7 @@ +import { OpenAIEmbedding } from "@llamaindex/openai"; import { Document, IngestionPipeline, - OpenAIEmbedding, SentenceSplitter, VectorStoreIndex, } from "llamaindex"; diff --git a/examples/jsonExtract.ts b/examples/jsonExtract.ts index 68af23c1dfaed3b38274712382ffd19b50297f29..4622177e7d19b9d7a06d1f8c4977c34b427a237f 100644 --- a/examples/jsonExtract.ts +++ b/examples/jsonExtract.ts @@ -1,4 +1,4 @@ -import { OpenAI } from "llamaindex"; +import { OpenAI } from "@llamaindex/openai"; // Example using OpenAI's chat API to extract JSON from a sales call transcript // using json_mode see https://platform.openai.com/docs/guides/text-generation/json-mode for more details diff --git a/examples/llama3.ts b/examples/llama3.ts index f5da9f23bb4152478e2fd783cb9cc15447152be4..49d981be31e2669d238a8a801640ca1b07555000 100644 --- a/examples/llama3.ts +++ b/examples/llama3.ts @@ -1,4 +1,4 @@ -import { ReplicateLLM } from "llamaindex"; +import { ReplicateLLM } from "@llamaindex/replicate"; (async () => { const tres = new ReplicateLLM({ model: "llama-3-70b-instruct" }); diff --git a/examples/llamadeuce.ts b/examples/llamadeuce.ts index 6f8243aa19613f739580753adc00f9da01091e91..ae8309ca7d88005aa93c853cf674b83d081c94d6 100644 --- a/examples/llamadeuce.ts +++ b/examples/llamadeuce.ts @@ -1,4 +1,4 @@ -import { DeuceChatStrategy, LlamaDeuce } from "llamaindex"; +import { DeuceChatStrategy, LlamaDeuce } from "@llamaindex/replicate"; (async () => { const deuce = new LlamaDeuce({ chatStrategy: DeuceChatStrategy.META }); diff --git a/examples/markdown.ts b/examples/markdown.ts index b2767713e6b8eb1989f27a9799dd224b17b0b468..289828e19a90e3be5a5f065ac382213470da7c03 100644 --- a/examples/markdown.ts +++ b/examples/markdown.ts @@ -1,4 +1,5 @@ -import { MarkdownReader, VectorStoreIndex } from "llamaindex"; +import { MarkdownReader } from "@llamaindex/readers/markdown"; +import { VectorStoreIndex } from "llamaindex"; async function main() { // Load Markdown file diff --git a/examples/metadata-filter/milvus.ts b/examples/metadata-filter/milvus.ts index 9415bca57fb01c54f14222a44e843ec5c81aa108..e26b366294a02f5b26f444d85401f82064b5affe 100644 --- a/examples/metadata-filter/milvus.ts +++ b/examples/metadata-filter/milvus.ts @@ -1,4 +1,5 @@ -import { MilvusVectorStore, VectorStoreIndex } from "llamaindex"; +import { MilvusVectorStore } from "@llamaindex/milvus"; +import { VectorStoreIndex } from "llamaindex"; const collectionName = "movie_reviews"; diff --git a/examples/milvus/load.ts b/examples/milvus/load.ts index 25a5c3e64dac87d5cdbb4cd91d8188886095499a..aea34664c85053d4b6165920121a6d758d993a31 100644 --- a/examples/milvus/load.ts +++ b/examples/milvus/load.ts @@ -1,9 +1,6 @@ -import { - CSVReader, - MilvusVectorStore, - storageContextFromDefaults, - VectorStoreIndex, -} from "llamaindex"; +import { MilvusVectorStore } from "@llamaindex/milvus"; +import { CSVReader } from "@llamaindex/readers/csv"; +import { storageContextFromDefaults, VectorStoreIndex } from "llamaindex"; const collectionName = "movie_reviews"; diff --git a/examples/milvus/query.ts b/examples/milvus/query.ts index ff33fc69a70affe0ac654f4aa2d769a2b5fa9589..3ee8a82d9a63c9da4d374f3b7a5a95a2112e524e 100644 --- a/examples/milvus/query.ts +++ b/examples/milvus/query.ts @@ -1,4 +1,5 @@ -import { MilvusVectorStore, VectorStoreIndex } from "llamaindex"; +import { MilvusVectorStore } from "@llamaindex/milvus"; +import { VectorStoreIndex } from "llamaindex"; const collectionName = "movie_reviews"; diff --git a/examples/mistral.ts b/examples/mistral.ts index 67555c337f683e6b0e74998812363d579a9411e9..5d37a567c3562eb3ab8297e5b4f83e43fe0fa26a 100644 --- a/examples/mistral.ts +++ b/examples/mistral.ts @@ -1,11 +1,6 @@ +import { MistralAI, MistralAIEmbedding } from "@llamaindex/mistral"; import * as fs from "fs/promises"; -import { - Document, - MistralAI, - MistralAIEmbedding, - Settings, - VectorStoreIndex, -} from "llamaindex"; +import { Document, Settings, VectorStoreIndex } from "llamaindex"; // Update embed model Settings.embedModel = new MistralAIEmbedding(); diff --git a/examples/mongo.ts b/examples/mongo.ts index 24f245ea3a9a37c77c9faabf3bb4fa77b9caf755..4bb8aa59418688beaf76bf52953e341219fca7a1 100644 --- a/examples/mongo.ts +++ b/examples/mongo.ts @@ -1,4 +1,5 @@ -import { Document, SimpleMongoReader, VectorStoreIndex } from "llamaindex"; +import { SimpleMongoReader } from "@llamaindex/readers/mongo"; +import { Document, VectorStoreIndex } from "llamaindex"; import { MongoClient } from "mongodb"; import { stdin as input, stdout as output } from "node:process"; diff --git a/examples/mongodb/2_load_and_index.ts b/examples/mongodb/2_load_and_index.ts index bc2380a29cdeefff3474b86f5b69fd95bae2cc4c..7eae088f91f21c7d72044d819b533f7a1b8a08b8 100644 --- a/examples/mongodb/2_load_and_index.ts +++ b/examples/mongodb/2_load_and_index.ts @@ -1,10 +1,7 @@ +import { MongoDBAtlasVectorSearch } from "@llamaindex/mongodb"; import { SimpleMongoReader } from "@llamaindex/readers/mongo"; import * as dotenv from "dotenv"; -import { - MongoDBAtlasVectorSearch, - storageContextFromDefaults, - VectorStoreIndex, -} from "llamaindex"; +import { storageContextFromDefaults, VectorStoreIndex } from "llamaindex"; import { MongoClient } from "mongodb"; // Load environment variables from local .env file diff --git a/examples/mongodb/3_query.ts b/examples/mongodb/3_query.ts index 2e097b977932a135d959ea48715de91534264ce0..088579ca234214410b8cf198e31929748a253507 100644 --- a/examples/mongodb/3_query.ts +++ b/examples/mongodb/3_query.ts @@ -1,5 +1,6 @@ +import { MongoDBAtlasVectorSearch } from "@llamaindex/mongodb"; import * as dotenv from "dotenv"; -import { MongoDBAtlasVectorSearch, VectorStoreIndex } from "llamaindex"; +import { VectorStoreIndex } from "llamaindex"; import { MongoClient } from "mongodb"; // Load environment variables from local .env file diff --git a/examples/multimodal/chat.ts b/examples/multimodal/chat.ts index 4884a11b5297a339c1b58dea6dd56d1e55251b3d..4e0e4081cee531c399bd36a0546615af59b79ab8 100644 --- a/examples/multimodal/chat.ts +++ b/examples/multimodal/chat.ts @@ -1,5 +1,6 @@ // call pnpm tsx multimodal/load.ts first to init the storage -import { OpenAI, Settings, SimpleChatEngine, imageToDataUrl } from "llamaindex"; +import { OpenAI } from "@llamaindex/openai"; +import { Settings, SimpleChatEngine, imageToDataUrl } from "llamaindex"; import fs from "node:fs/promises"; import path from "path"; diff --git a/examples/multimodal/clip.ts b/examples/multimodal/clip.ts index f3eaa984d6c8a0119fe355a249558b9ade9b9c0b..6e06078667894556edc50f77a592bdbc0a46f443 100644 --- a/examples/multimodal/clip.ts +++ b/examples/multimodal/clip.ts @@ -1,4 +1,5 @@ -import { ClipEmbedding, similarity, SimilarityType } from "llamaindex"; +import { ClipEmbedding } from "@llamaindex/clip"; +import { similarity, SimilarityType } from "llamaindex"; async function main() { const clip = new ClipEmbedding(); diff --git a/examples/multimodal/context.ts b/examples/multimodal/context.ts index bfe0c7daa92ac4afe15c1ca056d81c25a6f699c0..f7c7e12fffc3589c026861e8f6696b6fb2475021 100644 --- a/examples/multimodal/context.ts +++ b/examples/multimodal/context.ts @@ -1,10 +1,10 @@ // call pnpm tsx multimodal/load.ts first to init the storage -import { extractText } from "@llamaindex/core/utils"; +import { OpenAI } from "@llamaindex/openai"; import { ContextChatEngine, + extractText, NodeWithScore, ObjectType, - OpenAI, Settings, VectorStoreIndex, } from "llamaindex"; diff --git a/examples/multimodal/rag.ts b/examples/multimodal/rag.ts index 14d3a1c7488e8365edfe5d746e81de2d920156d9..068121c366cbd495d61d7e780ba893d9eb13c247 100644 --- a/examples/multimodal/rag.ts +++ b/examples/multimodal/rag.ts @@ -1,7 +1,7 @@ -import { extractText } from "@llamaindex/core/utils"; +import { OpenAI } from "@llamaindex/openai"; import { + extractText, getResponseSynthesizer, - OpenAI, Settings, VectorStoreIndex, } from "llamaindex"; diff --git a/examples/ollama.ts b/examples/ollama.ts index 781c3cc085f0e784574d3441d25d288013297ccb..0cc6dd3f465b2a780dad720f7eb32f25457177a9 100644 --- a/examples/ollama.ts +++ b/examples/ollama.ts @@ -1,4 +1,4 @@ -import { OllamaEmbedding } from "llamaindex"; +import { OllamaEmbedding } from "@llamaindex/ollama"; import { Ollama } from "llamaindex/llm/ollama"; (async () => { diff --git a/examples/openai.ts b/examples/openai.ts index e1b36e940f12f4ed99d1f70611a736753820d932..20a383d8ee0da7d64aa5ee1bbea56408e9f165c0 100644 --- a/examples/openai.ts +++ b/examples/openai.ts @@ -1,4 +1,4 @@ -import { OpenAI, OpenAIEmbedding } from "llamaindex"; +import { OpenAI, OpenAIEmbedding } from "@llamaindex/openai"; (async () => { const llm = new OpenAI({ model: "gpt-4-1106-preview", temperature: 0.1 }); diff --git a/examples/openai_o1.ts b/examples/openai_o1.ts index 5bf789dd216d08d64dded730968a1bde43b629bf..2ecc28a172b7b59574e6f155f338b55d2a3225d2 100644 --- a/examples/openai_o1.ts +++ b/examples/openai_o1.ts @@ -1,4 +1,4 @@ -import { OpenAI } from "llamaindex"; +import { OpenAI } from "@llamaindex/openai"; (async () => { const llm = new OpenAI({ model: "o1-preview", temperature: 1 }); diff --git a/examples/package.json b/examples/package.json index 8b73a192092b254092e4e8d4317dcaecf561f985..1054d3c7022184858f75ac2f2e81433a13042d6d 100644 --- a/examples/package.json +++ b/examples/package.json @@ -4,30 +4,50 @@ "version": "0.1.0", "dependencies": { "@ai-sdk/openai": "^1.0.5", - "@aws-crypto/sha256-js": "^5.2.0", "@azure/cosmos": "^4.1.1", "@azure/identity": "^4.4.1", "@azure/search-documents": "^12.1.0", - "@datastax/astra-db-ts": "^1.4.1", - "@llamaindex/core": "^0.4.21", - "@llamaindex/readers": "^1.0.23", "@llamaindex/vercel": "^0.0.8", "@llamaindex/workflow": "^0.0.8", "@notionhq/client": "^2.2.15", "@pinecone-database/pinecone": "^4.0.0", "@vercel/postgres": "^0.10.0", - "@zilliz/milvus2-sdk-node": "^2.4.6", "ai": "^4.0.0", - "chromadb": "^1.8.1", "commander": "^12.1.0", "dotenv": "^16.4.5", "js-tiktoken": "^1.0.14", "llamaindex": "^0.8.33", "mongodb": "6.7.0", - "pathe": "^1.1.2", "postgres": "^3.4.4", "ajv": "^8.17.1", - "wikipedia": "^2.1.2" + "wikipedia": "^2.1.2", + "@llamaindex/openai": "workspace:*", + "@llamaindex/cloud": "workspace:*", + "@llamaindex/anthropic": "workspace:*", + "@llamaindex/clip": "workspace:*", + "@llamaindex/azure": "workspace:*", + "@llamaindex/deepinfra": "workspace:*", + "@llamaindex/groq": "workspace:*", + "@llamaindex/huggingface": "workspace:*", + "@llamaindex/node-parser": "workspace:*", + "@llamaindex/ollama": "workspace:*", + "@llamaindex/portkey-ai": "workspace:*", + "@llamaindex/readers": "workspace:*", + "@llamaindex/replicate": "workspace:*", + "@llamaindex/vllm": "workspace:*", + "@llamaindex/postgres": "workspace:*", + "@llamaindex/astra": "workspace:*", + "@llamaindex/milvus": "workspace:*", + "@llamaindex/chroma": "workspace:*", + "@llamaindex/mongodb": "workspace:*", + "@llamaindex/pinecone": "workspace:*", + "@llamaindex/qdrant": "workspace:*", + "@llamaindex/upstash": "workspace:*", + "@llamaindex/weaviate": "workspace:*", + "@llamaindex/google": "workspace:*", + "@llamaindex/mistral": "workspace:*", + "@llamaindex/mixedbread": "workspace:*", + "@llamaindex/cohere": "workspace:*" }, "devDependencies": { "@types/node": "^22.9.0", diff --git a/examples/pinecone-vector-store/load-docs.ts b/examples/pinecone-vector-store/load-docs.ts index 7de41d0bfa2986f9d53fa410c3f9fed12ceef0d6..1c6a1575876f05e4d520197072183d168569c2d4 100755 --- a/examples/pinecone-vector-store/load-docs.ts +++ b/examples/pinecone-vector-store/load-docs.ts @@ -1,7 +1,7 @@ // load-docs.ts +import { PineconeVectorStore } from "@llamaindex/pinecone"; import fs from "fs/promises"; import { - PineconeVectorStore, SimpleDirectoryReader, storageContextFromDefaults, VectorStoreIndex, diff --git a/examples/pinecone-vector-store/query.ts b/examples/pinecone-vector-store/query.ts index a1ad19524d5adff79071bccb94aef253481d2c91..34ccf5bb1669dc4af250ad39abc990ccee5118b0 100755 --- a/examples/pinecone-vector-store/query.ts +++ b/examples/pinecone-vector-store/query.ts @@ -1,4 +1,5 @@ -import { PineconeVectorStore, VectorStoreIndex } from "llamaindex"; +import { PineconeVectorStore } from "@llamaindex/pinecone"; +import { VectorStoreIndex } from "llamaindex"; async function main() { // eslint-disable-next-line @typescript-eslint/no-require-imports diff --git a/examples/portkey.ts b/examples/portkey.ts index 694c0258600df08405899e651146b8fe4cdb4e6d..a75ab2dd5df6c4928dc2db5d089b6ea3d58cd4b6 100644 --- a/examples/portkey.ts +++ b/examples/portkey.ts @@ -1,4 +1,4 @@ -import { Portkey } from "llamaindex"; +import { Portkey } from "@llamaindex/portkey-ai"; (async () => { const portkey = new Portkey({ diff --git a/examples/qdrantdb/preFilters.ts b/examples/qdrantdb/preFilters.ts index bfd59da2eac987403f29346b2d32697113ffc010..53f4f03a71a7c2380ecb29ec5cafa9828c86c957 100644 --- a/examples/qdrantdb/preFilters.ts +++ b/examples/qdrantdb/preFilters.ts @@ -1,9 +1,9 @@ +import { QdrantVectorStore } from "@llamaindex/qdrant"; import * as dotenv from "dotenv"; import { Document, MetadataMode, NodeWithScore, - QdrantVectorStore, Settings, VectorStoreIndex, storageContextFromDefaults, diff --git a/examples/readers/package.json b/examples/readers/package.json index 5a169393084da6fab8414a2649820bfbb5246c07..a8e4d0f208034a9eabfd059c87e43a8102635b52 100644 --- a/examples/readers/package.json +++ b/examples/readers/package.json @@ -20,7 +20,8 @@ }, "dependencies": { "@llamaindex/readers": "*", - "llamaindex": "*" + "llamaindex": "*", + "@llamaindex/cloud": "*" }, "devDependencies": { "@types/node": "^22.9.0", diff --git a/examples/readers/src/assemblyai.ts b/examples/readers/src/assemblyai.ts index a15939142d6b59d2838ccdff6fb9a86bebe964b8..70daa317fea71c66fb51036147b5122aa6940b83 100644 --- a/examples/readers/src/assemblyai.ts +++ b/examples/readers/src/assemblyai.ts @@ -1,6 +1,9 @@ -import { AudioTranscriptReader } from "@llamaindex/readers/assembly-ai"; +import { + AudioTranscriptReader, + TranscribeParams, +} from "@llamaindex/readers/assembly-ai"; import { program } from "commander"; -import { TranscribeParams, VectorStoreIndex } from "llamaindex"; +import { VectorStoreIndex } from "llamaindex"; import { stdin as input, stdout as output } from "node:process"; import { createInterface } from "node:readline/promises"; diff --git a/examples/readers/src/csv.ts b/examples/readers/src/csv.ts index ac69468e34fee9b2ca55c0ad20276526b337a87d..2ced907ee59430a8b9df8cfd02c98b86a5a68c40 100644 --- a/examples/readers/src/csv.ts +++ b/examples/readers/src/csv.ts @@ -1,7 +1,7 @@ +import { OpenAI } from "@llamaindex/openai"; import { CSVReader } from "@llamaindex/readers/csv"; import { getResponseSynthesizer, - OpenAI, PromptTemplate, Settings, VectorStoreIndex, diff --git a/examples/readers/src/llamaparse-docx.ts b/examples/readers/src/llamaparse-docx.ts index 6e6609706415290132934e93aa2a1214b95b263f..5d28d39ec163ad3b65edf1a1889404915188d0e6 100644 --- a/examples/readers/src/llamaparse-docx.ts +++ b/examples/readers/src/llamaparse-docx.ts @@ -1,4 +1,4 @@ -import { Language, LlamaParseReader } from "llamaindex"; +import { Language, LlamaParseReader } from "@llamaindex/cloud"; import fs from "node:fs"; import path from "node:path"; diff --git a/examples/readers/src/llamaparse-json.ts b/examples/readers/src/llamaparse-json.ts index c5f2fc5806eedd4eadccb27deec4bb674eae9e74..05ad482cd4c93572a881ec08febef2787a705ffb 100644 --- a/examples/readers/src/llamaparse-json.ts +++ b/examples/readers/src/llamaparse-json.ts @@ -1,11 +1,11 @@ -import { createMessageContent } from "@llamaindex/core/response-synthesizers"; +import { LlamaParseReader } from "@llamaindex/cloud"; +import { OpenAI } from "@llamaindex/openai"; import { Document, ImageNode, - LlamaParseReader, - OpenAI, PromptTemplate, VectorStoreIndex, + createMessageContent, } from "llamaindex"; const reader = new LlamaParseReader(); diff --git a/examples/readers/src/llamaparse.ts b/examples/readers/src/llamaparse.ts index 3a9e6b2875869cd67944a9f4111e2dea20b5b16b..f45c2d2210c5aaf1b8c9b66a4a4ea86fd5374a1f 100644 --- a/examples/readers/src/llamaparse.ts +++ b/examples/readers/src/llamaparse.ts @@ -1,4 +1,5 @@ -import { LlamaParseReader, VectorStoreIndex } from "llamaindex"; +import { LlamaParseReader } from "@llamaindex/cloud"; +import { VectorStoreIndex } from "llamaindex"; async function main() { // Load PDF using LlamaParse diff --git a/examples/readers/src/pdf_fw_openai.ts b/examples/readers/src/pdf_fw_openai.ts index 1089f46af6e6a2392d124c0c32af532665b7bfeb..3e33d37d343d8a35c8fa3422e25ba7c7d7fdf124 100644 --- a/examples/readers/src/pdf_fw_openai.ts +++ b/examples/readers/src/pdf_fw_openai.ts @@ -1,5 +1,6 @@ +import { OpenAI, OpenAIEmbedding } from "@llamaindex/openai"; import { PDFReader } from "@llamaindex/readers/pdf"; -import { OpenAI, OpenAIEmbedding, VectorStoreIndex } from "llamaindex"; +import { VectorStoreIndex } from "llamaindex"; import { Settings } from "llamaindex"; diff --git a/examples/readers/src/simple-directory-reader-with-llamaparse.ts b/examples/readers/src/simple-directory-reader-with-llamaparse.ts index 5a1214a472bc7d166baa1409cc4f7ff29fd73427..e71683657799d5fb80ceb41f035ef15663b0c1ce 100644 --- a/examples/readers/src/simple-directory-reader-with-llamaparse.ts +++ b/examples/readers/src/simple-directory-reader-with-llamaparse.ts @@ -1,5 +1,5 @@ -import { SimpleDirectoryReader } from "@llamaindex/readers/directory"; -import { LlamaParseReader, VectorStoreIndex } from "llamaindex"; +import { LlamaParseReader } from "@llamaindex/cloud"; +import { SimpleDirectoryReader, VectorStoreIndex } from "llamaindex"; async function main() { const reader = new SimpleDirectoryReader(); diff --git a/examples/readers/src/simple-directory-reader.ts b/examples/readers/src/simple-directory-reader.ts index 23df9c82be9afb2f986cb31f7fc57355312c9e1b..784b5b5efc8aad5d8ace96d1434d26a0b0e24881 100644 --- a/examples/readers/src/simple-directory-reader.ts +++ b/examples/readers/src/simple-directory-reader.ts @@ -1,4 +1,4 @@ -import { SimpleDirectoryReader } from "@llamaindex/readers/directory"; +import { SimpleDirectoryReader } from "llamaindex"; // or // import { SimpleDirectoryReader } from 'llamaindex' diff --git a/examples/readonly.ts b/examples/readonly.ts index e0a2cfb9ba3374c8494757b14503c3423e7cbb45..64fcdf09b5960c3b88d531f259877d92ad2c3b74 100644 --- a/examples/readonly.ts +++ b/examples/readonly.ts @@ -1,9 +1,6 @@ +import { PDFReader } from "@llamaindex/readers/pdf"; import { execSync } from "child_process"; -import { - PDFReader, - VectorStoreIndex, - storageContextFromDefaults, -} from "llamaindex"; +import { VectorStoreIndex, storageContextFromDefaults } from "llamaindex"; const STORAGE_DIR = "./cache"; diff --git a/examples/recipes/cost-analysis.ts b/examples/recipes/cost-analysis.ts index 09da9530808f41e110495492e4e0d267221d2830..2a5e3cb9e43fab255bf79f6389723e224f4d7709 100644 --- a/examples/recipes/cost-analysis.ts +++ b/examples/recipes/cost-analysis.ts @@ -1,6 +1,6 @@ -import { extractText } from "@llamaindex/core/utils"; +import { OpenAI } from "@llamaindex/openai"; import { encodingForModel } from "js-tiktoken"; -import { ChatMessage, OpenAI } from "llamaindex"; +import { ChatMessage, extractText } from "llamaindex"; import { Settings } from "llamaindex/Settings"; const encoding = encodingForModel("gpt-4-0125-preview"); diff --git a/examples/rerankers/CohereReranker.ts b/examples/rerankers/CohereReranker.ts index 6b4bda87bbbd3128ac9a7c0fd635f7c4a8b65a4c..7e1cc812eb2e1cabd436b64edc22375e3e25f8af 100644 --- a/examples/rerankers/CohereReranker.ts +++ b/examples/rerankers/CohereReranker.ts @@ -1,10 +1,6 @@ -import { - CohereRerank, - Document, - OpenAI, - Settings, - VectorStoreIndex, -} from "llamaindex"; +import { CohereRerank } from "@llamaindex/cohere"; +import { OpenAI } from "@llamaindex/openai"; +import { Document, Settings, VectorStoreIndex } from "llamaindex"; import essay from "../essay"; diff --git a/examples/routerQueryEngine.ts b/examples/routerQueryEngine.ts index b1081d5c3d9e7ebd540c26545f0e33ed7375bce2..51f5025bd15f14533e48e5c6fc68f709d9e1a147 100644 --- a/examples/routerQueryEngine.ts +++ b/examples/routerQueryEngine.ts @@ -1,5 +1,5 @@ +import { OpenAI } from "@llamaindex/openai"; import { - OpenAI, RouterQueryEngine, SentenceSplitter, Settings, diff --git a/examples/sentenceWindow.ts b/examples/sentenceWindow.ts index cd470db3457e06d441f61855c7d305e42dce1475..c8dfe7ce614e9a3ca0630ef6dc7c1905c3e4a895 100644 --- a/examples/sentenceWindow.ts +++ b/examples/sentenceWindow.ts @@ -1,6 +1,6 @@ +import { HuggingFaceEmbedding } from "@llamaindex/huggingface"; import { Document, - HuggingFaceEmbedding, MetadataReplacementPostProcessor, SentenceWindowNodeParser, Settings, diff --git a/examples/toolsStream.ts b/examples/toolsStream.ts index 7108ddd06802a358319c2439bdc09452b482d0d2..55e6ca2502aaad053e4c253c23ad2ec4c710b6ce 100644 --- a/examples/toolsStream.ts +++ b/examples/toolsStream.ts @@ -1,4 +1,4 @@ -import { OpenAI } from "llamaindex"; +import { OpenAI } from "@llamaindex/openai"; async function main() { const llm = new OpenAI({ model: "gpt-4-turbo" }); diff --git a/examples/vector-store/azure/index.ts b/examples/vector-store/azure/index.ts index ddaeadcf08cfa59b0b2ac3663bec6d08523a1d6d..2a3f1e57b9f8631a81166af701213bd6a16315ca 100644 --- a/examples/vector-store/azure/index.ts +++ b/examples/vector-store/azure/index.ts @@ -7,19 +7,20 @@ import { KnownAnalyzerNames, KnownVectorSearchAlgorithmKind, } from "@azure/search-documents"; -import dotenv from "dotenv"; import { AzureAISearchVectorStore, - Document, FilterableMetadataFieldKeysType, + IndexManagement, + MetadataIndexFieldType, +} from "@llamaindex/azure"; +import { OpenAI, OpenAIEmbedding } from "@llamaindex/openai"; +import dotenv from "dotenv"; +import { + Document, FilterCondition, FilterOperator, - IndexManagement, Metadata, - MetadataIndexFieldType, NodeWithScore, - OpenAI, - OpenAIEmbedding, Settings, SimpleDirectoryReader, storageContextFromDefaults, diff --git a/examples/vector-store/pg/load-docs.ts b/examples/vector-store/pg/load-docs.ts index 1932a472ccb816ee0eadff7f4c585f32eae8d608..ac37877159a331fa197db3d39849d79ea9d6fcc1 100755 --- a/examples/vector-store/pg/load-docs.ts +++ b/examples/vector-store/pg/load-docs.ts @@ -1,10 +1,10 @@ // load-docs.ts +import { PGVectorStore } from "@llamaindex/postgres"; import { SimpleDirectoryReader, storageContextFromDefaults, VectorStoreIndex, } from "llamaindex"; -import { PGVectorStore } from "llamaindex/vector-store/PGVectorStore"; import fs from "node:fs/promises"; async function getSourceFilenames(sourceDir: string) { diff --git a/examples/vector-store/pg/supabase.ts b/examples/vector-store/pg/supabase.ts index 1cbbb5354a40e9431147e3b71537420189e1d6be..db157b8625b9c948655ce658b649ce876dbed522 100644 --- a/examples/vector-store/pg/supabase.ts +++ b/examples/vector-store/pg/supabase.ts @@ -1,3 +1,4 @@ +import { PGVectorStore } from "@llamaindex/postgres"; import dotenv from "dotenv"; import { SimpleDirectoryReader, @@ -5,8 +6,6 @@ import { VectorStoreIndex, } from "llamaindex"; -import { PGVectorStore } from "llamaindex/vector-store/PGVectorStore"; - dotenv.config(); // Get direct connection string from Supabase and set it as POSTGRES_URL environment variable diff --git a/examples/vectorIndexAnthropic.ts b/examples/vectorIndexAnthropic.ts index 6ebef441fc67579a48df89e78142a176926039b6..2a3e03d0ad363af350e13b110657c20617a85b0b 100644 --- a/examples/vectorIndexAnthropic.ts +++ b/examples/vectorIndexAnthropic.ts @@ -1,7 +1,7 @@ import fs from "node:fs/promises"; +import { Anthropic } from "@llamaindex/anthropic"; import { - Anthropic, Document, Settings, VectorStoreIndex, diff --git a/examples/vectorIndexCustomize.ts b/examples/vectorIndexCustomize.ts index 437a5415bc098a38a00adb5e9dc327dea3e4f2e3..337b6e634eb91a08c132723890479a0071ab95dd 100644 --- a/examples/vectorIndexCustomize.ts +++ b/examples/vectorIndexCustomize.ts @@ -1,6 +1,6 @@ +import { OpenAI } from "@llamaindex/openai"; import { Document, - OpenAI, RetrieverQueryEngine, Settings, SimilarityPostprocessor, diff --git a/examples/vectorIndexEmbed3.ts b/examples/vectorIndexEmbed3.ts index da0507682e002f9955535d3fd0b658d19990e24d..57c0371e27d749f11aa8d3ec4aafeeafcd5f5cdc 100644 --- a/examples/vectorIndexEmbed3.ts +++ b/examples/vectorIndexEmbed3.ts @@ -1,11 +1,7 @@ import fs from "node:fs/promises"; -import { - Document, - OpenAIEmbedding, - Settings, - VectorStoreIndex, -} from "llamaindex"; +import { OpenAIEmbedding } from "@llamaindex/openai"; +import { Document, Settings, VectorStoreIndex } from "llamaindex"; // Update embed model Settings.embedModel = new OpenAIEmbedding({ diff --git a/examples/vectorIndexFromVectorStore.ts b/examples/vectorIndexFromVectorStore.ts index d89a9552bfb44abdffcf1c0511dea6ff3b94d0ed..5526f73920a94275d534d076f804e422f2d61fa2 100644 --- a/examples/vectorIndexFromVectorStore.ts +++ b/examples/vectorIndexFromVectorStore.ts @@ -1,8 +1,8 @@ +import { OpenAI, OpenAIEmbedding } from "@llamaindex/openai"; +import { Index, Pinecone, RecordMetadata } from "@pinecone-database/pinecone"; import { BaseVectorStore, getResponseSynthesizer, - OpenAI, - OpenAIEmbedding, RetrieverQueryEngine, Settings, TextNode, @@ -12,8 +12,6 @@ import { VectorStoreQueryResult, } from "llamaindex"; -import { Index, Pinecone, RecordMetadata } from "@pinecone-database/pinecone"; - // Update llm Settings.llm = new OpenAI({ model: "gpt-4", diff --git a/examples/vectorIndexGPT4.ts b/examples/vectorIndexGPT4.ts index dab35809e9908e4135362c4bae92c60de39a1146..6134da7364db8656c29a29b34bc7d4f47c8d8970 100644 --- a/examples/vectorIndexGPT4.ts +++ b/examples/vectorIndexGPT4.ts @@ -1,6 +1,7 @@ import fs from "node:fs/promises"; -import { Document, OpenAI, Settings, VectorStoreIndex } from "llamaindex"; +import { OpenAI } from "@llamaindex/openai"; +import { Document, Settings, VectorStoreIndex } from "llamaindex"; Settings.llm = new OpenAI({ model: "gpt-4" }); diff --git a/examples/vectorIndexLocal.ts b/examples/vectorIndexLocal.ts index 9d19e68ced68f43cb642442ba6ff5e09e8f5ddef..f2b67b0f1b4fcd19038d180853e189f31a3018ea 100644 --- a/examples/vectorIndexLocal.ts +++ b/examples/vectorIndexLocal.ts @@ -1,12 +1,8 @@ import fs from "node:fs/promises"; -import { - Document, - HuggingFaceEmbedding, - Ollama, - Settings, - VectorStoreIndex, -} from "llamaindex"; +import { HuggingFaceEmbedding } from "@llamaindex/huggingface"; +import { Ollama } from "@llamaindex/ollama"; +import { Document, Settings, VectorStoreIndex } from "llamaindex"; Settings.llm = new Ollama({ model: "mixtral:8x7b", diff --git a/examples/vision.ts b/examples/vision.ts index 6ee5d33d5b92a66eccc5a1e570efcd9317b9ab73..f4838b36664df0760b9e65f9594bf72b030a6fea 100644 --- a/examples/vision.ts +++ b/examples/vision.ts @@ -1,4 +1,4 @@ -import { OpenAI } from "llamaindex"; +import { OpenAI } from "@llamaindex/openai"; (async () => { const llm = new OpenAI({ model: "gpt-4-turbo", temperature: 0.1 }); diff --git a/examples/vllm.ts b/examples/vllm.ts index f65cd375ea7adbd6710be3ad868def9e60a0c258..c384fb914a9133eeb4af57bc6142b8f4f5889e46 100644 --- a/examples/vllm.ts +++ b/examples/vllm.ts @@ -1,4 +1,4 @@ -import { VLLM } from "llamaindex"; +import { VLLM } from "@llamaindex/vllm"; const llm = new VLLM({ model: "NousResearch/Meta-Llama-3-8B-Instruct", diff --git a/examples/weaviate/load.ts b/examples/weaviate/load.ts index a1bf38187b8347e1e6a5da1de717b68c5ab4b8d6..a1d0e003209e501d4bdb1db0fe27ac8c580d479d 100644 --- a/examples/weaviate/load.ts +++ b/examples/weaviate/load.ts @@ -1,9 +1,6 @@ -import { - CSVReader, - storageContextFromDefaults, - VectorStoreIndex, - WeaviateVectorStore, -} from "llamaindex"; +import { CSVReader } from "@llamaindex/readers/csv"; +import { WeaviateVectorStore } from "@llamaindex/weaviate"; +import { storageContextFromDefaults, VectorStoreIndex } from "llamaindex"; const indexName = "MovieReviews"; diff --git a/examples/weaviate/query.ts b/examples/weaviate/query.ts index ad265dece299db555c92d92943433b3f7bcf3a94..6c83f74d6eb761613aa88158b1d3f21c753d427c 100644 --- a/examples/weaviate/query.ts +++ b/examples/weaviate/query.ts @@ -1,4 +1,5 @@ -import { VectorStoreIndex, WeaviateVectorStore } from "llamaindex"; +import { WeaviateVectorStore } from "@llamaindex/weaviate"; +import { VectorStoreIndex } from "llamaindex"; const indexName = "MovieReviews"; diff --git a/examples/wiki.ts b/examples/wiki.ts index 8ff6efda08a12558ae22afd3e162607f31bd9af8..4a829fcb525b3ea5626449a846e609b05c01734c 100644 --- a/examples/wiki.ts +++ b/examples/wiki.ts @@ -1,7 +1,7 @@ /** Example of a tool that uses Wikipedia */ -import type { BaseTool, ToolMetadata } from "@llamaindex/core/llms"; import type { JSONSchemaType } from "ajv"; +import type { BaseTool, ToolMetadata } from "llamaindex"; import { default as wiki } from "wikipedia"; type WikipediaParameter = { diff --git a/examples/workflow/app-creator.ts b/examples/workflow/app-creator.ts index 8a6448cb1c061855376ddf645855900dc3eb9e42..c1b05659eb24fbe3b9015a5f9a66fa34553da916 100644 --- a/examples/workflow/app-creator.ts +++ b/examples/workflow/app-creator.ts @@ -1,3 +1,4 @@ +import { OpenAI } from "@llamaindex/openai"; import { HandlerContext, StartEvent, @@ -5,7 +6,6 @@ import { Workflow, WorkflowEvent, } from "@llamaindex/workflow"; -import { OpenAI } from "llamaindex"; const MAX_REVIEWS = 3; diff --git a/examples/workflow/conditional.ts b/examples/workflow/conditional.ts index c8b6cf04013cedb55115dbe9b5d22ec2a31d6d9e..6a2e384069f7eb959e25c5f1875bae9964aa2ebf 100644 --- a/examples/workflow/conditional.ts +++ b/examples/workflow/conditional.ts @@ -1,3 +1,4 @@ +import { OpenAI } from "@llamaindex/openai"; import { HandlerContext, StartEvent, @@ -5,7 +6,6 @@ import { Workflow, WorkflowEvent, } from "@llamaindex/workflow"; -import { OpenAI } from "llamaindex"; // Create LLM instance const llm = new OpenAI(); diff --git a/examples/workflow/joke.ts b/examples/workflow/joke.ts index 310761d77a4b5a33b632ac94342d18eb17872ecf..b13e4dd1813d2725675157534a5a85158908d5b3 100644 --- a/examples/workflow/joke.ts +++ b/examples/workflow/joke.ts @@ -1,10 +1,10 @@ +import { OpenAI } from "@llamaindex/openai"; import { StartEvent, StopEvent, Workflow, WorkflowEvent, } from "@llamaindex/workflow"; -import { OpenAI } from "llamaindex"; // Create LLM instance const llm = new OpenAI(); diff --git a/examples/workflow/stream-events.ts b/examples/workflow/stream-events.ts index 2fc1f107df4316343f14e2932196f3d02917fa4e..8d337b3540ecc37b94a945ea369482a844d195f6 100644 --- a/examples/workflow/stream-events.ts +++ b/examples/workflow/stream-events.ts @@ -1,3 +1,4 @@ +import { OpenAI } from "@llamaindex/openai"; import { HandlerContext, StartEvent, @@ -5,7 +6,6 @@ import { Workflow, WorkflowEvent, } from "@llamaindex/workflow"; -import { OpenAI } from "llamaindex"; // Create LLM instance const llm = new OpenAI(); diff --git a/examples/workflow/validation.ts b/examples/workflow/validation.ts index 7be348cfcd79bec1f793991308b4849ebfa60226..7bfacf8442e334e34b6b1790da48307b201f9bb0 100644 --- a/examples/workflow/validation.ts +++ b/examples/workflow/validation.ts @@ -1,10 +1,10 @@ +import { OpenAI } from "@llamaindex/openai"; import { StartEvent, StopEvent, Workflow, WorkflowEvent, } from "@llamaindex/workflow"; -import { OpenAI } from "llamaindex"; // Create LLM instance const llm = new OpenAI(); diff --git a/packages/cloud/package.json b/packages/cloud/package.json index c472ee4415e97194dca0f7e31cee1ab75e7d7441..0f56f196f30882a1c3ba7dd646a69e5ed93297ce 100644 --- a/packages/cloud/package.json +++ b/packages/cloud/package.json @@ -42,6 +42,20 @@ "types": "./reader/dist/index.d.ts", "default": "./reader/dist/index.js" } + }, + ".": { + "require": { + "types": "./reader/dist/index.d.cts", + "default": "./reader/dist/index.cjs" + }, + "import": { + "types": "./reader/dist/index.d.ts", + "default": "./reader/dist/index.js" + }, + "default": { + "types": "./reader/dist/index.d.ts", + "default": "./reader/dist/index.js" + } } }, "repository": { diff --git a/packages/llamaindex/src/index.edge.ts b/packages/llamaindex/src/index.edge.ts index 302c1fb913097bb4167571907f8acb1c54ab69fa..bd0b3cf4e366a74063a01d52ef737126284f2036 100644 --- a/packages/llamaindex/src/index.edge.ts +++ b/packages/llamaindex/src/index.edge.ts @@ -65,6 +65,7 @@ export * from "@llamaindex/core/storage/chat-store"; export * from "@llamaindex/core/storage/doc-store"; export * from "@llamaindex/core/storage/index-store"; export * from "@llamaindex/core/storage/kv-store"; +export * from "@llamaindex/core/utils"; export * from "./agent/index.js"; export * from "./cloud/index.js"; export * from "./embeddings/index.js"; diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 96dff2a4c85ec8b9cf52f740b8a19fa307744c5d..35f006c8ce35f650d455b8f1a395cbc481d41165 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -538,9 +538,6 @@ importers: '@ai-sdk/openai': specifier: ^1.0.5 version: 1.0.5(zod@3.24.1) - '@aws-crypto/sha256-js': - specifier: ^5.2.0 - version: 5.2.0 '@azure/cosmos': specifier: ^4.1.1 version: 4.1.1 @@ -550,18 +547,90 @@ importers: '@azure/search-documents': specifier: ^12.1.0 version: 12.1.0 - '@datastax/astra-db-ts': - specifier: ^1.4.1 - version: 1.4.1 - '@llamaindex/core': - specifier: ^0.4.21 - version: link:../packages/core + '@llamaindex/anthropic': + specifier: workspace:* + version: link:../packages/providers/anthropic + '@llamaindex/astra': + specifier: workspace:* + version: link:../packages/providers/storage/astra + '@llamaindex/azure': + specifier: workspace:* + version: link:../packages/providers/storage/azure + '@llamaindex/chroma': + specifier: workspace:* + version: link:../packages/providers/storage/chroma + '@llamaindex/clip': + specifier: workspace:* + version: link:../packages/providers/clip + '@llamaindex/cloud': + specifier: workspace:* + version: link:../packages/cloud + '@llamaindex/cohere': + specifier: workspace:* + version: link:../packages/providers/cohere + '@llamaindex/deepinfra': + specifier: workspace:* + version: link:../packages/providers/deepinfra + '@llamaindex/google': + specifier: workspace:* + version: link:../packages/providers/google + '@llamaindex/groq': + specifier: workspace:* + version: link:../packages/providers/groq + '@llamaindex/huggingface': + specifier: workspace:* + version: link:../packages/providers/huggingface + '@llamaindex/milvus': + specifier: workspace:* + version: link:../packages/providers/storage/milvus + '@llamaindex/mistral': + specifier: workspace:* + version: link:../packages/providers/mistral + '@llamaindex/mixedbread': + specifier: workspace:* + version: link:../packages/providers/mixedbread + '@llamaindex/mongodb': + specifier: workspace:* + version: link:../packages/providers/storage/mongodb + '@llamaindex/node-parser': + specifier: workspace:* + version: link:../packages/node-parser + '@llamaindex/ollama': + specifier: workspace:* + version: link:../packages/providers/ollama + '@llamaindex/openai': + specifier: workspace:* + version: link:../packages/providers/openai + '@llamaindex/pinecone': + specifier: workspace:* + version: link:../packages/providers/storage/pinecone + '@llamaindex/portkey-ai': + specifier: workspace:* + version: link:../packages/providers/portkey-ai + '@llamaindex/postgres': + specifier: workspace:* + version: link:../packages/providers/storage/postgres + '@llamaindex/qdrant': + specifier: workspace:* + version: link:../packages/providers/storage/qdrant '@llamaindex/readers': - specifier: ^1.0.23 + specifier: workspace:* version: link:../packages/readers + '@llamaindex/replicate': + specifier: workspace:* + version: link:../packages/providers/replicate + '@llamaindex/upstash': + specifier: workspace:* + version: link:../packages/providers/storage/upstash '@llamaindex/vercel': specifier: ^0.0.8 version: link:../packages/providers/vercel + '@llamaindex/vllm': + specifier: workspace:* + version: link:../packages/providers/vllm + '@llamaindex/weaviate': + specifier: workspace:* + version: link:../packages/providers/storage/weaviate '@llamaindex/workflow': specifier: ^0.0.8 version: link:../packages/workflow @@ -574,18 +643,12 @@ importers: '@vercel/postgres': specifier: ^0.10.0 version: 0.10.0 - '@zilliz/milvus2-sdk-node': - specifier: ^2.4.6 - version: 2.4.6 ai: specifier: ^4.0.0 version: 4.0.0(react@19.0.0-rc-5c56b873-20241107)(zod@3.24.1) ajv: specifier: ^8.17.1 version: 8.17.1 - chromadb: - specifier: ^1.8.1 - version: 1.9.2(cohere-ai@7.14.0(@aws-sdk/client-sso-oidc@3.714.0(@aws-sdk/client-sts@3.714.0))(encoding@0.1.13))(encoding@0.1.13)(openai@4.73.1(encoding@0.1.13)(zod@3.24.1)) commander: specifier: ^12.1.0 version: 12.1.0 @@ -601,9 +664,6 @@ importers: mongodb: specifier: 6.7.0 version: 6.7.0(@aws-sdk/credential-providers@3.714.0(@aws-sdk/client-sso-oidc@3.714.0(@aws-sdk/client-sts@3.714.0))) - pathe: - specifier: ^1.1.2 - version: 1.1.2 postgres: specifier: ^3.4.4 version: 3.4.4 @@ -623,6 +683,9 @@ importers: examples/readers: dependencies: + '@llamaindex/cloud': + specifier: '*' + version: link:../../packages/cloud '@llamaindex/readers': specifier: '*' version: link:../../packages/readers @@ -6142,21 +6205,6 @@ packages: voyageai: optional: true - chromadb@1.9.2: - resolution: {integrity: sha512-JNeLKlrsPxld7oPJCNeF73yHyyYeyP950enWRkTa6WsJ6UohH2NQ1vXZu6lWO9WuA9EMypITyZFZ8KtcTV3y2Q==} - engines: {node: '>=14.17.0'} - peerDependencies: - '@google/generative-ai': ^0.1.1 - cohere-ai: ^5.0.0 || ^6.0.0 || ^7.0.0 - openai: ^3.0.0 || ^4.0.0 - peerDependenciesMeta: - '@google/generative-ai': - optional: true - cohere-ai: - optional: true - openai: - optional: true - chrome-trace-event@1.0.3: resolution: {integrity: sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg==} engines: {node: '>=6.0'} @@ -17330,16 +17378,6 @@ snapshots: transitivePeerDependencies: - encoding - chromadb@1.9.2(cohere-ai@7.14.0(@aws-sdk/client-sso-oidc@3.714.0(@aws-sdk/client-sts@3.714.0))(encoding@0.1.13))(encoding@0.1.13)(openai@4.73.1(encoding@0.1.13)(zod@3.24.1)): - dependencies: - cliui: 8.0.1 - isomorphic-fetch: 3.0.0(encoding@0.1.13) - optionalDependencies: - cohere-ai: 7.14.0(@aws-sdk/client-sso-oidc@3.714.0(@aws-sdk/client-sts@3.714.0))(encoding@0.1.13) - openai: 4.73.1(encoding@0.1.13)(zod@3.24.1) - transitivePeerDependencies: - - encoding - chrome-trace-event@1.0.3: {} ci-info@3.8.0: {}