From 0fbf7b4ace642fc2057ffa35d96a2391ebded160 Mon Sep 17 00:00:00 2001
From: Logan Markewich <logan.markewich@live.com>
Date: Thu, 20 Jul 2023 19:04:01 -0600
Subject: [PATCH] update examples and docs

---
 apps/docs/docs/end_to_end.md        |  4 ++++
 apps/simple/chatEngine.ts           |  2 +-
 apps/simple/openai.ts               | 26 ++++++++++----------------
 apps/simple/subquestion.ts          |  2 +-
 apps/simple/vectorIndexCustomize.ts | 11 +++++++++--
 5 files changed, 25 insertions(+), 20 deletions(-)

diff --git a/apps/docs/docs/end_to_end.md b/apps/docs/docs/end_to_end.md
index d30ad1675..1b4ca4338 100644
--- a/apps/docs/docs/end_to_end.md
+++ b/apps/docs/docs/end_to_end.md
@@ -18,6 +18,10 @@ Create a list index and query it. This example also use the `LLMRetriever`, whic
 
 Create a vector index and query it. The vector index will use embeddings to fetch the top k most relevant nodes. By default, the top k is 2.
 
+## [Customized Vector Index](https://github.com/run-llama/LlamaIndexTS/blob/main/apps/simple/vectorIndexCustomize.ts)
+
+Create a vector index and query it, while also configuring the the `LLM`, the `ServiceContext`, and the `similarity_top_k`.
+
 ## [OpenAI LLM](https://github.com/run-llama/LlamaIndexTS/blob/main/apps/simple/openai.ts)
 
 Create an OpenAI LLM and directly use it for chat. 
diff --git a/apps/simple/chatEngine.ts b/apps/simple/chatEngine.ts
index 97485b497..a79bfd88c 100644
--- a/apps/simple/chatEngine.ts
+++ b/apps/simple/chatEngine.ts
@@ -26,7 +26,7 @@ async function main() {
   while (true) {
     const query = await rl.question("Query: ");
     const response = await chatEngine.chat(query);
-    console.log(response);
+    console.log(response.toString());
   }
 }
 
diff --git a/apps/simple/openai.ts b/apps/simple/openai.ts
index 12e91f67a..f53709c64 100644
--- a/apps/simple/openai.ts
+++ b/apps/simple/openai.ts
@@ -1,19 +1,13 @@
-// @ts-ignore
-import process from "node:process";
-import { Configuration, OpenAIWrapper } from "llamaindex/src/llm/openai";
+import { OpenAI } from "llamaindex";
 
 (async () => {
-  const configuration = new Configuration({
-    apiKey: process.env.OPENAI_API_KEY,
-  });
-
-  const openai = new OpenAIWrapper(configuration);
-
-  const { data } = await openai.createChatCompletion({
-    model: "gpt-3.5-turbo-0613",
-    messages: [{ role: "user", content: "Hello, world!" }],
-  });
-
-  console.log(data);
-  console.log(data.choices[0].message);
+  const llm = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0.0 });
+  
+  // complete api
+  const response1 = await llm.complete("How are you?");
+  console.log(response1.message.content);
+
+  // chat api
+  const response2 = await llm.chat([{ content: "Tell me a joke!", role: "user" }]);
+  console.log(response2.message.content);
 })();
diff --git a/apps/simple/subquestion.ts b/apps/simple/subquestion.ts
index e386a296c..ff7c8225f 100644
--- a/apps/simple/subquestion.ts
+++ b/apps/simple/subquestion.ts
@@ -22,5 +22,5 @@ import essay from "./essay";
     "How was Paul Grahams life different before and after YC?"
   );
 
-  console.log(response);
+  console.log(response.toString());
 })();
diff --git a/apps/simple/vectorIndexCustomize.ts b/apps/simple/vectorIndexCustomize.ts
index 49d5ca4d8..aa7ebc6ae 100644
--- a/apps/simple/vectorIndexCustomize.ts
+++ b/apps/simple/vectorIndexCustomize.ts
@@ -1,14 +1,21 @@
-import { Document, VectorStoreIndex, RetrieverQueryEngine } from "llamaindex";
+import { Document, VectorStoreIndex, RetrieverQueryEngine, OpenAI, serviceContextFromDefaults } from "llamaindex";
 import essay from "./essay";
 
 // Customize retrieval and query args
 async function main() {
   const document = new Document({ text: essay });
-  const index = await VectorStoreIndex.fromDocuments([document]);
+
+  const serviceContext = serviceContextFromDefaults(
+    { llm: new OpenAI({ model: "gpt-3.5-turbo", temperature: 0.0 }) }
+  );
+
+  const index = await VectorStoreIndex.fromDocuments([document], undefined, serviceContext);
+  
   const retriever = index.asRetriever();
   retriever.similarityTopK = 5;
   // TODO: cannot pass responseSynthesizer into retriever query engine
   const queryEngine = new RetrieverQueryEngine(retriever);
+  
   const response = await queryEngine.query(
     "What did the author do growing up?"
   );
-- 
GitLab