From ab0d666f0376f183446898f35a88000fb336bd2f Mon Sep 17 00:00:00 2001
From: Elliot Kang <kkang2097@gmail.com>
Date: Sat, 30 Sep 2023 11:46:54 -0700
Subject: [PATCH] fixed imports, moved llmStrem example

---
 {examples => apps/simple}/llmStream.ts | 37 ++++++++++++--------------
 1 file changed, 17 insertions(+), 20 deletions(-)
 rename {examples => apps/simple}/llmStream.ts (53%)

diff --git a/examples/llmStream.ts b/apps/simple/llmStream.ts
similarity index 53%
rename from examples/llmStream.ts
rename to apps/simple/llmStream.ts
index ffaa495b6..4cf0ff366 100644
--- a/examples/llmStream.ts
+++ b/apps/simple/llmStream.ts
@@ -1,6 +1,6 @@
-import * as tiktoken from "tiktoken-node";
-import { ChatMessage, OpenAI } from "../packages/core/src/llm/LLM";
-import {SimpleChatEngine } from "../packages/core/src/ChatEngine";
+import {ChatMessage, OpenAI, SimpleChatEngine} from "llamaindex";
+import { stdin as input, stdout as output } from "node:process";
+import readline from "node:readline/promises";
 
 async function main() {
   const query: string = `
@@ -13,9 +13,7 @@ Where is Istanbul?
   var accumulated_result: string = "";
   var total_tokens: number = 0;
 
-  //Callback stuff, like logging token usage.
-  //GPT 3.5 Turbo uses CL100K_Base encodings, check your LLM to see which tokenizer it uses.
-  const encoding = tiktoken.getEncoding("cl100k_base");
+  //TODO: Add callbacks later
 
   //Stream Complete
   //Note: Setting streaming flag to true or false will auto-set your return type to
@@ -31,25 +29,24 @@ Where is Istanbul?
     accumulated_result += part;
   }
 
-  const correct_total_tokens: number =
-    encoding.encode(accumulated_result).length;
-
-  console.log(accumulated_result);
-  //Check if our stream token counter works
-  console.log(
-    `Output token total using tokenizer on accumulated output: ${correct_total_tokens}`,
-  );
-
 
   accumulated_result = "";
   const chatEngine: SimpleChatEngine = new SimpleChatEngine();
-  const chatStream = await chatEngine.chat(query, undefined, true);
-    for await (const part of chatStream){
-      console.log(part);
-      accumulated_result += part;
+
+  const rl = readline.createInterface({ input, output });
+  while (true) {
+    const query = await rl.question("Query: ");
+
+    if (!query) {
+      break;
     }
 
-  console.log(accumulated_result);
+    const chatStream = await chatEngine.chat(query, undefined, true);
+    for await (const part of chatStream){
+      process.stdout.write(part);
+      // accumulated_result += part;
+    }
+  }
 }
 
 main();
-- 
GitLab