From afc051686850569b2f50aa2493332b69d1dc4d00 Mon Sep 17 00:00:00 2001
From: Marcus Schiesser <mail@marcusschiesser.de>
Date: Mon, 18 Dec 2023 14:52:37 +0700
Subject: [PATCH] add RAG to mistral example

---
 examples/mistral.ts | 41 ++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 40 insertions(+), 1 deletion(-)

diff --git a/examples/mistral.ts b/examples/mistral.ts
index 4687add02..259119504 100644
--- a/examples/mistral.ts
+++ b/examples/mistral.ts
@@ -1,4 +1,35 @@
-import { MistralAI, MistralAIEmbedding } from "llamaindex";
+import * as fs from "fs/promises";
+import {
+  BaseEmbedding,
+  Document,
+  LLM,
+  MistralAI,
+  MistralAIEmbedding,
+  VectorStoreIndex,
+  serviceContextFromDefaults,
+} from "llamaindex";
+
+async function rag(llm: LLM, embedModel: BaseEmbedding, query: string) {
+  // Load essay from abramov.txt in Node
+  const path = "node_modules/llamaindex/examples/abramov.txt";
+
+  const essay = await fs.readFile(path, "utf-8");
+
+  // Create Document object with essay
+  const document = new Document({ text: essay, id_: path });
+
+  // Split text and create embeddings. Store them in a VectorStoreIndex
+  const serviceContext = serviceContextFromDefaults({ llm, embedModel });
+
+  const index = await VectorStoreIndex.fromDocuments([document], {
+    serviceContext,
+  });
+
+  // Query the index
+  const queryEngine = index.asQueryEngine();
+  const response = await queryEngine.query(query);
+  return response.response;
+}
 
 (async () => {
   // embeddings
@@ -26,4 +57,12 @@ import { MistralAI, MistralAIEmbedding } from "llamaindex";
   for await (const chunk of stream) {
     process.stdout.write(chunk);
   }
+
+  // rag
+  const ragResponse = await rag(
+    llm,
+    embedding,
+    "What did the author do in college?",
+  );
+  console.log(ragResponse);
 })();
-- 
GitLab