Skip to content
Snippets Groups Projects
Commit ab0d666f authored by Elliot Kang's avatar Elliot Kang
Browse files

fixed imports, moved llmStrem example

parent 30add7a7
No related branches found
No related tags found
No related merge requests found
import * as tiktoken from "tiktoken-node"; import {ChatMessage, OpenAI, SimpleChatEngine} from "llamaindex";
import { ChatMessage, OpenAI } from "../packages/core/src/llm/LLM"; import { stdin as input, stdout as output } from "node:process";
import {SimpleChatEngine } from "../packages/core/src/ChatEngine"; import readline from "node:readline/promises";
async function main() { async function main() {
const query: string = ` const query: string = `
...@@ -13,9 +13,7 @@ Where is Istanbul? ...@@ -13,9 +13,7 @@ Where is Istanbul?
var accumulated_result: string = ""; var accumulated_result: string = "";
var total_tokens: number = 0; var total_tokens: number = 0;
//Callback stuff, like logging token usage. //TODO: Add callbacks later
//GPT 3.5 Turbo uses CL100K_Base encodings, check your LLM to see which tokenizer it uses.
const encoding = tiktoken.getEncoding("cl100k_base");
//Stream Complete //Stream Complete
//Note: Setting streaming flag to true or false will auto-set your return type to //Note: Setting streaming flag to true or false will auto-set your return type to
...@@ -31,25 +29,24 @@ Where is Istanbul? ...@@ -31,25 +29,24 @@ Where is Istanbul?
accumulated_result += part; accumulated_result += part;
} }
const correct_total_tokens: number =
encoding.encode(accumulated_result).length;
console.log(accumulated_result);
//Check if our stream token counter works
console.log(
`Output token total using tokenizer on accumulated output: ${correct_total_tokens}`,
);
accumulated_result = ""; accumulated_result = "";
const chatEngine: SimpleChatEngine = new SimpleChatEngine(); const chatEngine: SimpleChatEngine = new SimpleChatEngine();
const chatStream = await chatEngine.chat(query, undefined, true);
for await (const part of chatStream){ const rl = readline.createInterface({ input, output });
console.log(part); while (true) {
accumulated_result += part; const query = await rl.question("Query: ");
if (!query) {
break;
} }
console.log(accumulated_result); const chatStream = await chatEngine.chat(query, undefined, true);
for await (const part of chatStream){
process.stdout.write(part);
// accumulated_result += part;
}
}
} }
main(); main();
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment