-
Yi Ding authored
Allows our scripts to be run repeatedly with vector store backing and persistence without throwing an error. This is rudimentary support which doesn't work if the document has changed over time. Also added the insert and delete functions so that documents can be added manually.
Yi Ding authoredAllows our scripts to be run repeatedly with vector store backing and persistence without throwing an error. This is rudimentary support which doesn't work if the document has changed over time. Also added the insert and delete functions so that documents can be added manually.
Code owners
Assign users and groups as approvers for specific file changes. Learn more.
gptllama.ts 1.17 KiB
import { stdin as input, stdout as output } from "node:process";
// readline/promises is still experimental so not in @types/node yet
// @ts-ignore
import readline from "node:readline/promises";
import { ChatMessage, LlamaDeuce, OpenAI } from "llamaindex";
(async () => {
const gpt4 = new OpenAI({ model: "gpt-4", temperature: 0.9 });
const l2 = new LlamaDeuce({
model: "Llama-2-70b-chat-4bit",
temperature: 0.9,
});
const rl = readline.createInterface({ input, output });
const start = await rl.question("Start: ");
const history: ChatMessage[] = [
{
content:
"Prefer shorter answers. Keep your response to 100 words or less.",
role: "system",
},
{ content: start, role: "user" },
];
while (true) {
const next = history.length % 2 === 1 ? gpt4 : l2;
const r = await next.chat(
history.map(({ content, role }) => ({
content,
role: next === l2 ? role : role === "user" ? "assistant" : "user",
})),
);
history.push({
content: r.message.content,
role: next === l2 ? "assistant" : "user",
});
await rl.question((next === l2 ? "Llama: " : "GPT: ") + r.message.content);
}
})();