diff --git a/examples/mistral.ts b/examples/mistral.ts index 4687add02b4fae2732372078713a33e2b6484834..25911950415961b0eebacafea7fa4d5b7530af6f 100644 --- a/examples/mistral.ts +++ b/examples/mistral.ts @@ -1,4 +1,35 @@ -import { MistralAI, MistralAIEmbedding } from "llamaindex"; +import * as fs from "fs/promises"; +import { + BaseEmbedding, + Document, + LLM, + MistralAI, + MistralAIEmbedding, + VectorStoreIndex, + serviceContextFromDefaults, +} from "llamaindex"; + +async function rag(llm: LLM, embedModel: BaseEmbedding, query: string) { + // Load essay from abramov.txt in Node + const path = "node_modules/llamaindex/examples/abramov.txt"; + + const essay = await fs.readFile(path, "utf-8"); + + // Create Document object with essay + const document = new Document({ text: essay, id_: path }); + + // Split text and create embeddings. Store them in a VectorStoreIndex + const serviceContext = serviceContextFromDefaults({ llm, embedModel }); + + const index = await VectorStoreIndex.fromDocuments([document], { + serviceContext, + }); + + // Query the index + const queryEngine = index.asQueryEngine(); + const response = await queryEngine.query(query); + return response.response; +} (async () => { // embeddings @@ -26,4 +57,12 @@ import { MistralAI, MistralAIEmbedding } from "llamaindex"; for await (const chunk of stream) { process.stdout.write(chunk); } + + // rag + const ragResponse = await rag( + llm, + embedding, + "What did the author do in college?", + ); + console.log(ragResponse); })();