Skip to content
Snippets Groups Projects
Commit 844561b6 authored by Marcus Schiesser's avatar Marcus Schiesser
Browse files

chore: unified apps/simple and examples

parent 8a3ac0c3
Branches ms/move-examples
No related tags found
No related merge requests found
Showing
with 0 additions and 2919 deletions
# simple
## 0.0.35
### Patch Changes
- Updated dependencies [3bab231]
- llamaindex@0.0.37
## 0.0.34
### Patch Changes
- Updated dependencies
- Updated dependencies
- Updated dependencies
- Updated dependencies
- Updated dependencies
- Updated dependencies
- llamaindex@0.0.36
## 0.0.33
### Patch Changes
- Updated dependencies [63f2108]
- llamaindex@0.0.35
## 0.0.32
### Patch Changes
- Updated dependencies [2a27e21]
- llamaindex@0.0.34
## 0.0.31
### Patch Changes
- Updated dependencies [5e2e92c]
- llamaindex@0.0.33
## 0.0.30
### Patch Changes
- Updated dependencies [90c0b83]
- Updated dependencies [dfd22aa]
- llamaindex@0.0.32
## 0.0.29
### Patch Changes
- Updated dependencies [6c55b2d]
- Updated dependencies [8aa8c65]
- Updated dependencies [6c55b2d]
- llamaindex@0.0.31
## 0.0.28
### Patch Changes
- Updated dependencies [139abad]
- Updated dependencies [139abad]
- Updated dependencies [eb0e994]
- Updated dependencies [eb0e994]
- Updated dependencies [139abad]
- llamaindex@0.0.30
## 0.0.27
### Patch Changes
- Updated dependencies [a52143b]
- Updated dependencies [1b7fd95]
- Updated dependencies [0db3f41]
- llamaindex@0.0.29
## 0.0.26
### Patch Changes
- Updated dependencies [96bb657]
- Updated dependencies [96bb657]
- Updated dependencies [837854d]
- llamaindex@0.0.28
## 0.0.25
### Patch Changes
- Updated dependencies [4a5591b]
- Updated dependencies [4a5591b]
- Updated dependencies [4a5591b]
- llamaindex@0.0.27
## 0.0.24
### Patch Changes
- Updated dependencies [5bb55bc]
- llamaindex@0.0.26
## 0.0.23
### Patch Changes
- Updated dependencies [e21eca2]
- Updated dependencies [40a8f07]
- Updated dependencies [40a8f07]
- llamaindex@0.0.25
## 0.0.22
### Patch Changes
- Updated dependencies [e4af7b3]
- Updated dependencies [259fe63]
- llamaindex@0.0.24
## 0.0.21
### Patch Changes
- Updated dependencies
- Updated dependencies [9d6b2ed]
- llamaindex@0.0.23
## 0.0.20
### Patch Changes
- Updated dependencies [454f3f8]
- Updated dependencies [454f3f8]
- Updated dependencies [454f3f8]
- Updated dependencies [454f3f8]
- Updated dependencies [99df58f]
- llamaindex@0.0.22
## 0.0.19
### Patch Changes
- Updated dependencies [f7a57ca]
- Updated dependencies [0a09de2]
- Updated dependencies [f7a57ca]
- llamaindex@0.0.21
## 0.0.18
### Patch Changes
- Updated dependencies [b526a2d]
- Updated dependencies [b526a2d]
- Updated dependencies [b526a2d]
- llamaindex@0.0.20
## 0.0.17
### Patch Changes
- Updated dependencies [a747f28]
- Updated dependencies [355910b]
- Updated dependencies [355910b]
- llamaindex@0.0.19
## 0.0.16
### Patch Changes
- Updated dependencies [824c13c]
- Updated dependencies [18b8915]
- Updated dependencies [ade9d8f]
- Updated dependencies [824c13c]
- llamaindex@0.0.18
## 0.0.15
### Patch Changes
- Updated dependencies [f80b062]
- Updated dependencies [b3fec86]
- Updated dependencies [b3fec86]
- llamaindex@0.0.17
## 0.0.14
### Patch Changes
- Updated dependencies [ec12633]
- Updated dependencies [9214b06]
- Updated dependencies [3316c6b]
- Updated dependencies [3316c6b]
- llamaindex@0.0.16
## 0.0.13
### Patch Changes
- Updated dependencies [b501eb5]
- Updated dependencies [f9d1a6e]
- llamaindex@0.0.15
## 0.0.12
### Patch Changes
- Updated dependencies [4ef334a]
- Updated dependencies [0af7773]
- Updated dependencies [bea4af9]
- Updated dependencies [4ef334a]
- llamaindex@0.0.14
## 0.0.11
### Patch Changes
- Updated dependencies [4f6f245]
- llamaindex@0.0.13
## 0.0.10
### Patch Changes
- Updated dependencies [68bdaaa]
- llamaindex@0.0.12
## 0.0.9
### Patch Changes
- Updated dependencies [fb7fb76]
- llamaindex@0.0.11
## 0.0.8
### Patch Changes
- Updated dependencies [6f2cb31]
- llamaindex@0.0.10
## 0.0.7
### Patch Changes
- Updated dependencies [02d9bb0]
- llamaindex@0.0.9
## 0.0.6
### Patch Changes
- Updated dependencies [ea5038e]
- llamaindex@0.0.8
## 0.0.5
### Patch Changes
- Updated dependencies [9fa6d4a]
- llamaindex@0.0.7
## 0.0.4
### Patch Changes
- Updated dependencies
- llamaindex@0.0.6
## 0.0.3
### Patch Changes
- Updated dependencies [5a765aa]
- llamaindex@0.0.5
## 0.0.2
### Patch Changes
- Updated dependencies [c65d671]
- llamaindex@0.0.4
## 0.0.1
### Patch Changes
- Updated dependencies [ca9410f]
- llamaindex@0.0.3
# Simple Examples
Due to packaging, you will need to run these commands to get started.
```bash
pnpm install
pnpm --filter llamaindex build
```
Then run the examples with `ts-node`, for example `npx ts-node vectorIndex.ts`
import { Anthropic } from "llamaindex";
(async () => {
const anthropic = new Anthropic();
const result = await anthropic.chat([
{ content: "You want to talk in rhymes.", role: "system" },
{
content:
"How much wood would a woodchuck chuck if a woodchuck could chuck wood?",
role: "user",
},
]);
console.log(result);
})();
import { stdin as input, stdout as output } from "node:process";
// readline/promises is still experimental so not in @types/node yet
// @ts-ignore
import readline from "node:readline/promises";
import {
ContextChatEngine,
Document,
serviceContextFromDefaults,
VectorStoreIndex,
} from "llamaindex";
import essay from "./essay";
async function main() {
const document = new Document({ text: essay });
const serviceContext = serviceContextFromDefaults({ chunkSize: 512 });
const index = await VectorStoreIndex.fromDocuments([document], {
serviceContext,
});
const retriever = index.asRetriever();
retriever.similarityTopK = 5;
const chatEngine = new ContextChatEngine({ retriever });
const rl = readline.createInterface({ input, output });
while (true) {
const query = await rl.question("Query: ");
const response = await chatEngine.chat(query);
console.log(response.toString());
}
}
main().catch(console.error);
import {
CompactAndRefine,
OpenAI,
PapaCSVReader,
ResponseSynthesizer,
serviceContextFromDefaults,
VectorStoreIndex,
} from "llamaindex";
async function main() {
// Load CSV
const reader = new PapaCSVReader();
const path = "data/titanic_train.csv";
const documents = await reader.loadData(path);
const serviceContext = serviceContextFromDefaults({
llm: new OpenAI({ model: "gpt-4" }),
});
// Split text and create embeddings. Store them in a VectorStoreIndex
const index = await VectorStoreIndex.fromDocuments(documents, {
serviceContext,
});
const csvPrompt = ({ context = "", query = "" }) => {
return `The following CSV file is loaded from ${path}
\`\`\`csv
${context}
\`\`\`
Given the CSV file, generate me Typescript code to answer the question: ${query}. You can use built in NodeJS functions but avoid using third party libraries.
`;
};
const responseSynthesizer = new ResponseSynthesizer({
responseBuilder: new CompactAndRefine(serviceContext, csvPrompt),
});
const queryEngine = index.asQueryEngine({ responseSynthesizer });
// Query the index
const response = await queryEngine.query(
"What is the correlation between survival and age?",
);
// Output response
console.log(response.toString());
}
main().catch(console.error);
This diff is collapsed.
File deleted
This diff is collapsed.
import { SimpleDirectoryReader } from "llamaindex";
function callback(
category: string,
name: string,
status: any,
message?: string,
): boolean {
console.log(category, name, status, message);
if (name.endsWith(".pdf")) {
console.log("I DON'T WANT PDF FILES!");
return false;
}
return true;
}
async function main() {
// Load page
const reader = new SimpleDirectoryReader(callback);
const params = { directoryPath: "./data" };
await reader.loadData(params);
}
main().catch(console.error);
This diff is collapsed.
import { stdin as input, stdout as output } from "node:process";
// readline/promises is still experimental so not in @types/node yet
// @ts-ignore
import readline from "node:readline/promises";
import { ChatMessage, LlamaDeuce, OpenAI } from "llamaindex";
(async () => {
const gpt4 = new OpenAI({ model: "gpt-4", temperature: 0.9 });
const l2 = new LlamaDeuce({
model: "Llama-2-70b-chat-4bit",
temperature: 0.9,
});
const rl = readline.createInterface({ input, output });
const start = await rl.question("Start: ");
const history: ChatMessage[] = [
{
content:
"Prefer shorter answers. Keep your response to 100 words or less.",
role: "system",
},
{ content: start, role: "user" },
];
while (true) {
const next = history.length % 2 === 1 ? gpt4 : l2;
const r = await next.chat(
history.map(({ content, role }) => ({
content,
role: next === l2 ? role : role === "user" ? "assistant" : "user",
})),
);
history.push({
content: r.message.content,
role: next === l2 ? "assistant" : "user",
});
await rl.question((next === l2 ? "Llama: " : "GPT: ") + r.message.content);
}
})();
import { HTMLReader, VectorStoreIndex } from "llamaindex";
async function main() {
// Load page
const reader = new HTMLReader();
const documents = await reader.loadData("data/18-1_Changelog.html");
// Split text and create embeddings. Store them in a VectorStoreIndex
const index = await VectorStoreIndex.fromDocuments(documents);
// Query the index
const queryEngine = index.asQueryEngine();
const response = await queryEngine.query(
"What were the notable changes in 18.1?",
);
// Output response
console.log(response.toString());
}
main().catch(console.error);
import {
Document,
KeywordTableIndex,
KeywordTableRetrieverMode,
} from "llamaindex";
import essay from "./essay";
async function main() {
const document = new Document({ text: essay, id_: "essay" });
const index = await KeywordTableIndex.fromDocuments([document]);
const allModes: KeywordTableRetrieverMode[] = [
KeywordTableRetrieverMode.DEFAULT,
KeywordTableRetrieverMode.SIMPLE,
KeywordTableRetrieverMode.RAKE,
];
allModes.forEach(async (mode) => {
const queryEngine = index.asQueryEngine({
retriever: index.asRetriever({
mode,
}),
});
const response = await queryEngine.query(
"What did the author do growing up?",
);
console.log(response.toString());
});
}
main().catch((e: Error) => {
console.error(e, e.stack);
});
import { DeuceChatStrategy, LlamaDeuce } from "llamaindex";
(async () => {
const deuce = new LlamaDeuce({ chatStrategy: DeuceChatStrategy.META });
const result = await deuce.chat([{ content: "Hello, world!", role: "user" }]);
console.log(result);
})();
import {
Document,
NodeWithScore,
ResponseSynthesizer,
SimpleNodeParser,
TextNode,
} from "llamaindex";
(async () => {
const nodeParser = new SimpleNodeParser();
const nodes = nodeParser.getNodesFromDocuments([
new Document({ text: "I am 10 years old. John is 20 years old." }),
]);
console.log(nodes);
const responseSynthesizer = new ResponseSynthesizer();
const nodesWithScore: NodeWithScore[] = [
{
node: new TextNode({ text: "I am 10 years old." }),
score: 1,
},
{
node: new TextNode({ text: "John is 20 years old." }),
score: 0.5,
},
];
const response = await responseSynthesizer.synthesize(
"What age am I?",
nodesWithScore,
);
console.log(response.response);
})();
import { MarkdownReader, VectorStoreIndex } from "llamaindex";
async function main() {
// Load Markdown file
const reader = new MarkdownReader();
const documents = await reader.loadData("node_modules/llamaindex/README.md");
// Split text and create embeddings. Store them in a VectorStoreIndex
const index = await VectorStoreIndex.fromDocuments(documents);
// Query the index
const queryEngine = index.asQueryEngine();
const response = await queryEngine.query("What does the example code do?");
// Output response
console.log(response.toString());
}
main().catch(console.error);
import { MongoClient } from "mongodb";
import { VectorStoreIndex } from "../../packages/core/src/indices";
import { Document } from "../../packages/core/src/Node";
import { SimpleMongoReader } from "../../packages/core/src/readers/SimpleMongoReader";
import { stdin as input, stdout as output } from "node:process";
import readline from "node:readline/promises";
async function main() {
//Dummy test code
const query: object = { _id: "waldo" };
const options: object = {};
const projections: object = { embedding: 0 };
const limit: number = Infinity;
const uri: string = process.env.MONGODB_URI ?? "fake_uri";
const client: MongoClient = new MongoClient(uri);
//Where the real code starts
const MR = new SimpleMongoReader(client);
const documents: Document[] = await MR.loadData(
"data",
"posts",
1,
{},
options,
projections,
);
//
//If you need to look at low-level details of
// a queryEngine (for example, needing to check each individual node)
//
// Split text and create embeddings. Store them in a VectorStoreIndex
// var storageContext = await storageContextFromDefaults({});
// var serviceContext = serviceContextFromDefaults({});
// const docStore = storageContext.docStore;
// for (const doc of documents) {
// docStore.setDocumentHash(doc.id_, doc.hash);
// }
// const nodes = serviceContext.nodeParser.getNodesFromDocuments(documents);
// console.log(nodes);
//
//Making Vector Store from documents
//
const index = await VectorStoreIndex.fromDocuments(documents);
// Create query engine
const queryEngine = index.asQueryEngine();
const rl = readline.createInterface({ input, output });
while (true) {
const query = await rl.question("Query: ");
if (!query) {
break;
}
const response = await queryEngine.query(query);
// Output response
console.log(response.toString());
}
}
main();
import { Client } from "@notionhq/client";
import { program } from "commander";
import { NotionReader, VectorStoreIndex } from "llamaindex";
import { stdin as input, stdout as output } from "node:process";
// readline/promises is still experimental so not in @types/node yet
// @ts-ignore
import readline from "node:readline/promises";
program
.argument("[page]", "Notion page id (must be provided)")
.action(async (page, _options, command) => {
// Initializing a client
if (!process.env.NOTION_TOKEN) {
console.log(
"No NOTION_TOKEN found in environment variables. You will need to register an integration https://www.notion.com/my-integrations and put it in your NOTION_TOKEN environment variable.",
);
return;
}
const notion = new Client({
auth: process.env.NOTION_TOKEN,
});
if (!page) {
const response = await notion.search({
filter: {
value: "page",
property: "object",
},
sort: {
direction: "descending",
timestamp: "last_edited_time",
},
});
const { results } = response;
if (results.length === 0) {
console.log(
"No pages found. You will need to share it with your integration. (tap the three dots on the top right, find Add connections, and add your integration)",
);
return;
} else {
const pages = results
.map((result) => {
if (!("url" in result)) {
return null;
}
return {
id: result.id,
url: result.url,
};
})
.filter((page) => page !== null);
console.log("Found pages:");
console.table(pages);
console.log(`To run, run ts-node ${command.name()} [page id]`);
return;
}
}
const reader = new NotionReader({ client: notion });
const documents = await reader.loadData(page);
console.log(documents);
// Split text and create embeddings. Store them in a VectorStoreIndex
const index = await VectorStoreIndex.fromDocuments(documents);
// Create query engine
const queryEngine = index.asQueryEngine();
const rl = readline.createInterface({ input, output });
while (true) {
const query = await rl.question("Query: ");
if (!query) {
break;
}
const response = await queryEngine.query(query);
// Output response
console.log(response.toString());
}
});
program.parse();
import { OpenAI } from "llamaindex";
(async () => {
const llm = new OpenAI({ model: "gpt-4-1106-preview", temperature: 0.1 });
// complete api
const response1 = await llm.complete("How are you?");
console.log(response1.message.content);
// chat api
const response2 = await llm.chat([
{ content: "Tell me a joke.", role: "user" },
]);
console.log(response2.message.content);
})();
{
"version": "0.0.35",
"private": true,
"name": "simple",
"dependencies": {
"@notionhq/client": "^2.2.13",
"@pinecone-database/pinecone": "^1.1.2",
"commander": "^11.1.0",
"llamaindex": "workspace:*"
},
"devDependencies": {
"@types/node": "^18.18.6",
"ts-node": "^10.9.1"
},
"scripts": {
"lint": "eslint ."
}
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment