Skip to content
Snippets Groups Projects
Commit 844561b6 authored by Marcus Schiesser's avatar Marcus Schiesser
Browse files

chore: unified apps/simple and examples

parent 8a3ac0c3
No related branches found
No related tags found
No related merge requests found
Showing
with 39 additions and 592 deletions
import { PDFReader, VectorStoreIndex } from "llamaindex";
async function main() {
// Load PDF
const reader = new PDFReader();
const documents = await reader.loadData("data/brk-2022.pdf");
// Split text and create embeddings. Store them in a VectorStoreIndex
const index = await VectorStoreIndex.fromDocuments(documents);
// Query the index
const queryEngine = index.asQueryEngine();
const response = await queryEngine.query("What mistakes did they make?");
// Output response
console.log(response.toString());
}
main().catch(console.error);
import { Portkey } from "llamaindex";
(async () => {
const llms = [{}];
const portkey = new Portkey({
mode: "single",
llms: [
{
provider: "anyscale",
virtual_key: "anyscale-3b3c04",
model: "meta-llama/Llama-2-13b-chat-hf",
max_tokens: 2000,
},
],
});
const result = portkey.stream_chat([
{ role: "system", content: "You are a helpful assistant." },
{ role: "user", content: "Tell me a joke." },
]);
for await (const res of result) {
process.stdout.write(res);
}
})();
import fs from "node:fs/promises";
import { SentenceSplitter } from "llamaindex";
async function main() {
const path = "node_modules/llamaindex/examples/abramov.txt";
const essay = await fs.readFile(path, "utf-8");
const textSplitter = new SentenceSplitter();
const chunks = textSplitter.splitTextWithOverlaps(essay);
console.log(chunks);
}
main();
import {
Document,
storageContextFromDefaults,
VectorStoreIndex,
} from "llamaindex";
import essay from "./essay";
async function main() {
// Create Document object with essay
const document = new Document({ text: essay, id_: "essay" });
// Split text and create embeddings. Store them in a VectorStoreIndex
// persist the vector store automatically with the storage context
const storageContext = await storageContextFromDefaults({
persistDir: "./storage",
});
const index = await VectorStoreIndex.fromDocuments([document], {
storageContext,
});
// Query the index
const queryEngine = index.asQueryEngine();
const response = await queryEngine.query(
"What did the author do in college?",
);
// Output response
console.log(response.toString());
// load the index
const secondStorageContext = await storageContextFromDefaults({
persistDir: "./storage",
});
const loadedIndex = await VectorStoreIndex.init({
storageContext: secondStorageContext,
});
const loadedQueryEngine = loadedIndex.asQueryEngine();
const loadedResponse = await loadedQueryEngine.query(
"What did the author do growing up?",
);
console.log(loadedResponse.toString());
}
main().catch(console.error);
import { Document, SubQuestionQueryEngine, VectorStoreIndex } from "llamaindex";
import essay from "./essay";
(async () => {
const document = new Document({ text: essay, id_: essay });
const index = await VectorStoreIndex.fromDocuments([document]);
const queryEngine = SubQuestionQueryEngine.fromDefaults({
queryEngineTools: [
{
queryEngine: index.asQueryEngine(),
metadata: {
name: "pg_essay",
description: "Paul Graham essay on What I Worked On",
},
},
],
});
const response = await queryEngine.query(
"How was Paul Grahams life different before and after YC?",
);
console.log(response.toString());
})();
import {
Document,
SimpleNodeParser,
SummaryIndex,
SummaryRetrieverMode,
serviceContextFromDefaults,
} from "llamaindex";
import essay from "./essay";
async function main() {
const serviceContext = serviceContextFromDefaults({
nodeParser: new SimpleNodeParser({
chunkSize: 40,
}),
});
const document = new Document({ text: essay, id_: "essay" });
const index = await SummaryIndex.fromDocuments([document], {
serviceContext,
});
const queryEngine = index.asQueryEngine({
retriever: index.asRetriever({ mode: SummaryRetrieverMode.LLM }),
});
const response = await queryEngine.query(
"What did the author do growing up?",
);
console.log(response.toString());
}
main().catch((e: Error) => {
console.error(e, e.stack);
});
import fs from "node:fs/promises";
import { Document, VectorStoreIndex } from "llamaindex";
async function main() {
// Load essay from abramov.txt in Node
const path = "node_modules/llamaindex/examples/abramov.txt";
const essay = await fs.readFile(path, "utf-8");
// Create Document object with essay
const document = new Document({ text: essay, id_: path });
// Split text and create embeddings. Store them in a VectorStoreIndex
const index = await VectorStoreIndex.fromDocuments([document]);
// Query the index
const queryEngine = index.asQueryEngine();
const response = await queryEngine.query(
"What did the author do in college?",
);
// Output response
console.log(response.toString());
}
main().catch(console.error);
import fs from "node:fs/promises";
import {
Anthropic,
anthropicTextQaPrompt,
CompactAndRefine,
Document,
ResponseSynthesizer,
serviceContextFromDefaults,
VectorStoreIndex,
} from "llamaindex";
async function main() {
// Load essay from abramov.txt in Node
const path = "node_modules/llamaindex/examples/abramov.txt";
const essay = await fs.readFile(path, "utf-8");
// Create Document object with essay
const document = new Document({ text: essay, id_: path });
// Split text and create embeddings. Store them in a VectorStoreIndex
const serviceContext = serviceContextFromDefaults({ llm: new Anthropic() });
const responseSynthesizer = new ResponseSynthesizer({
responseBuilder: new CompactAndRefine(
serviceContext,
anthropicTextQaPrompt,
),
});
const index = await VectorStoreIndex.fromDocuments([document], {
serviceContext,
});
// Query the index
const queryEngine = index.asQueryEngine({ responseSynthesizer });
const response = await queryEngine.query(
"What did the author do in college?",
);
// Output response
console.log(response.toString());
}
main().catch(console.error);
import {
Document,
OpenAI,
RetrieverQueryEngine,
serviceContextFromDefaults,
SimilarityPostprocessor,
VectorStoreIndex,
} from "llamaindex";
import essay from "./essay";
// Customize retrieval and query args
async function main() {
const document = new Document({ text: essay, id_: "essay" });
const serviceContext = serviceContextFromDefaults({
llm: new OpenAI({ model: "gpt-3.5-turbo", temperature: 0.1 }),
});
const index = await VectorStoreIndex.fromDocuments([document], {
serviceContext,
});
const retriever = index.asRetriever();
retriever.similarityTopK = 5;
const nodePostprocessor = new SimilarityPostprocessor({
similarityCutoff: 0.7,
});
// TODO: cannot pass responseSynthesizer into retriever query engine
const queryEngine = new RetrieverQueryEngine(
retriever,
undefined,
undefined,
[nodePostprocessor],
);
const response = await queryEngine.query(
"What did the author do growing up?",
);
console.log(response.response);
}
main().catch(console.error);
import {
OpenAI,
ResponseSynthesizer,
RetrieverQueryEngine,
serviceContextFromDefaults,
TextNode,
TreeSummarize,
VectorIndexRetriever,
VectorStore,
VectorStoreIndex,
VectorStoreQuery,
VectorStoreQueryResult,
} from "llamaindex";
import { Index, Pinecone, RecordMetadata } from "@pinecone-database/pinecone";
/**
* Please do not use this class in production; it's only for demonstration purposes.
*/
class PineconeVectorStore<T extends RecordMetadata = RecordMetadata>
implements VectorStore
{
storesText = true;
isEmbeddingQuery = false;
indexName!: string;
pineconeClient!: Pinecone;
index!: Index<T>;
constructor({ indexName, client }: { indexName: string; client: Pinecone }) {
this.indexName = indexName;
this.pineconeClient = client;
this.index = client.index<T>(indexName);
}
client() {
return this.pineconeClient;
}
async query(
query: VectorStoreQuery,
kwargs?: any,
): Promise<VectorStoreQueryResult> {
let queryEmbedding: number[] = [];
if (query.queryEmbedding) {
if (typeof query.alpha === "number") {
const alpha = query.alpha;
queryEmbedding = query.queryEmbedding.map((v) => v * alpha);
} else {
queryEmbedding = query.queryEmbedding;
}
}
// Current LlamaIndexTS implementation only support exact match filter, so we use kwargs instead.
const filter = kwargs?.filter || {};
const response = await this.index.query({
filter,
vector: queryEmbedding,
topK: query.similarityTopK,
includeValues: true,
includeMetadata: true,
});
console.log(
`Numbers of vectors returned by Pinecone after preFilters are applied: ${
response?.matches?.length || 0
}.`,
);
const topKIds: string[] = [];
const topKNodes: TextNode[] = [];
const topKScores: number[] = [];
const metadataToNode = (metadata?: T): Partial<TextNode> => {
if (!metadata) {
throw new Error("metadata is undefined.");
}
const nodeContent = metadata["_node_content"];
if (!nodeContent) {
throw new Error("nodeContent is undefined.");
}
if (typeof nodeContent !== "string") {
throw new Error("nodeContent is not a string.");
}
return JSON.parse(nodeContent);
};
if (response.matches) {
for (const match of response.matches) {
const node = new TextNode({
...metadataToNode(match.metadata),
embedding: match.values,
});
topKIds.push(match.id);
topKNodes.push(node);
topKScores.push(match.score ?? 0);
}
}
const result = {
ids: topKIds,
nodes: topKNodes,
similarities: topKScores,
};
return result;
}
add(): Promise<string[]> {
return Promise.resolve([]);
}
delete(): Promise<void> {
throw new Error("Method `delete` not implemented.");
}
persist(): Promise<void> {
throw new Error("Method `persist` not implemented.");
}
}
/**
* The goal of this example is to show how to use Pinecone as a vector store
* for LlamaIndexTS with(out) preFilters.
*
* It should not be used in production like that,
* as you might want to find a proper PineconeVectorStore implementation.
*/
async function main() {
process.env.PINECONE_API_KEY = "Your Pinecone API Key.";
process.env.PINECONE_ENVIRONMENT = "Your Pinecone Environment.";
process.env.PINECONE_PROJECT_ID = "Your Pinecone Project ID.";
process.env.PINECONE_INDEX_NAME = "Your Pinecone Index Name.";
process.env.OPENAI_API_KEY = "Your OpenAI API Key.";
process.env.OPENAI_API_ORGANIZATION = "Your OpenAI API Organization.";
const getPineconeVectorStore = async () => {
return new PineconeVectorStore({
indexName: process.env.PINECONE_INDEX_NAME || "index-name",
client: new Pinecone(),
});
};
const getServiceContext = () => {
const openAI = new OpenAI({
model: "gpt-4",
apiKey: process.env.OPENAI_API_KEY,
});
return serviceContextFromDefaults({
llm: openAI,
});
};
const getQueryEngine = async (filter: unknown) => {
const vectorStore = await getPineconeVectorStore();
const serviceContext = getServiceContext();
const vectorStoreIndex = await VectorStoreIndex.fromVectorStore(
vectorStore,
serviceContext,
);
const retriever = new VectorIndexRetriever({
index: vectorStoreIndex,
similarityTopK: 500,
});
const responseSynthesizer = new ResponseSynthesizer({
serviceContext,
responseBuilder: new TreeSummarize(serviceContext),
});
return new RetrieverQueryEngine(retriever, responseSynthesizer, {
filter,
});
};
// whatever is a key from your metadata
const queryEngine = await getQueryEngine({
whatever: {
$gte: 1,
$lte: 100,
},
});
const response = await queryEngine.query("How many results do you have?");
console.log(response.toString());
}
main().catch(console.error);
import fs from "node:fs/promises";
import {
Document,
OpenAI,
serviceContextFromDefaults,
VectorStoreIndex,
} from "llamaindex";
async function main() {
// Load essay from abramov.txt in Node
const path = "node_modules/llamaindex/examples/abramov.txt";
const essay = await fs.readFile(path, "utf-8");
// Create Document object with essay
const document = new Document({ text: essay, id_: path });
// Split text and create embeddings. Store them in a VectorStoreIndex
const serviceContext = serviceContextFromDefaults({
llm: new OpenAI({ model: "gpt-4" }),
});
const index = await VectorStoreIndex.fromDocuments([document], {
serviceContext,
});
// Query the index
const queryEngine = index.asQueryEngine();
const response = await queryEngine.query(
"What did the author do in college?",
);
// Output response
console.log(response.toString());
}
main().catch(console.error);
import { OpenAI } from "llamaindex";
(async () => {
const llm = new OpenAI({ model: "gpt-4-vision-preview", temperature: 0.1 });
// complete api
const response1 = await llm.complete("How are you?");
console.log(response1.message.content);
// chat api
const response2 = await llm.chat([
{ content: "Tell me a joke!", role: "user" },
]);
console.log(response2.message.content);
})();
# Simple Examples
Make sure to run `npm install` and set your OpenAI environment variable before running these examples.
Before running any of the examples, make sure to set your OpenAI environment variable:
```bash
export OPENAI_API_KEY="sk-..."
```
There are two ways to run the examples, using the latest published version of `llamaindex` or using a local build.
## Using the latest published version
Make sure to call `npm install` before running these examples:
```bash
npm install
export OPENAI_API_KEY="sk-..."
npx ts-node vectorIndex.ts
```
Then run the examples with `ts-node`, for example `npx ts-node vectorIndex.ts`
## Using the local build
```bash
pnpm install
pnpm --filter llamaindex build
pnpm link ../packages/core
```
Then run the examples with `ts-node`, for example `pnpx ts-node vectorIndex.ts`
......@@ -3,10 +3,14 @@
"private": true,
"name": "simple",
"dependencies": {
"@notionhq/client": "^2.2.13",
"@pinecone-database/pinecone": "^1.1.2",
"commander": "^11.1.0",
"llamaindex": "latest"
},
"devDependencies": {
"@types/node": "^18"
"@types/node": "^18.18.6",
"ts-node": "^10.9.1"
},
"scripts": {
"lint": "eslint ."
......
......@@ -123,7 +123,7 @@ importers:
specifier: ^10.9.1
version: 10.9.1(@types/node@18.18.8)(typescript@5.3.2)
 
apps/simple:
examples:
dependencies:
'@notionhq/client':
specifier: ^2.2.13
......@@ -135,15 +135,15 @@ importers:
specifier: ^11.1.0
version: 11.1.0
llamaindex:
specifier: workspace:*
version: link:../../packages/core
specifier: latest
version: link:../packages/core
devDependencies:
'@types/node':
specifier: ^18.18.6
version: 18.18.7
version: 18.18.12
ts-node:
specifier: ^10.9.1
version: 10.9.1(@types/node@18.18.7)(typescript@5.3.2)
version: 10.9.1(@types/node@18.18.12)(typescript@5.3.2)
 
packages/core:
dependencies:
......@@ -3808,7 +3808,7 @@ packages:
resolution: {integrity: sha512-wJpEl30QUSy2K3/Q2c2knNiZlLXJ17JnQgaIiFbN68IMJy+2TE9fXLxvV1N/cMVs2+SpteAa6PlyrUgfGdlmDg==}
engines: {node: '>=12'}
dependencies:
'@types/node-fetch': 2.6.6
'@types/node-fetch': 2.6.9
node-fetch: 2.7.0(encoding@0.1.13)
transitivePeerDependencies:
- encoding
......@@ -3820,7 +3820,7 @@ packages:
dependencies:
'@edge-runtime/types': 2.2.4
'@sinclair/typebox': 0.29.6
'@types/node': 18.18.8
'@types/node': 18.18.12
ajv: 8.12.0
cross-fetch: 3.1.8(encoding@0.1.13)
encoding: 0.1.13
......@@ -4441,13 +4441,6 @@ packages:
resolution: {integrity: sha512-hov8bUuiLiyFPGyFPE1lwWhmzYbirOXQNNo40+y3zow8aFVTeyn3VWL0VFFfdNddA8S4Vf0Tc062rzyNr7Paag==}
dev: true
 
/@types/node-fetch@2.6.6:
resolution: {integrity: sha512-95X8guJYhfqiuVVhRFxVQcf4hW/2bCuoPwDasMf/531STFoNoWTT7YDnWdXHEZKqAGUigmpG31r2FE70LwnzJw==}
dependencies:
'@types/node': 18.18.12
form-data: 4.0.0
dev: false
/@types/node-fetch@2.6.9:
resolution: {integrity: sha512-bQVlnMLFJ2d35DkPNjEPmd9ueO/rh5EiaZt2bhqiSarPjZIuIV6bPQVqcrEyvNo+AfTrRGVazle1tl597w3gfA==}
dependencies:
......@@ -4468,16 +4461,11 @@ packages:
dependencies:
undici-types: 5.26.5
 
/@types/node@18.18.7:
resolution: {integrity: sha512-bw+lEsxis6eqJYW8Ql6+yTqkE6RuFtsQPSe5JxXbqYRFQEER5aJA9a5UH9igqDWm3X4iLHIKOHlnAXLM4mi7uQ==}
dependencies:
undici-types: 5.26.5
dev: true
/@types/node@18.18.8:
resolution: {integrity: sha512-OLGBaaK5V3VRBS1bAkMVP2/W9B+H8meUfl866OrMNQqt7wDgdpWPp5o6gmIc9pB+lIQHSq4ZL8ypeH1vPxcPaQ==}
dependencies:
undici-types: 5.26.5
dev: true
 
/@types/node@20.9.0:
resolution: {integrity: sha512-nekiGu2NDb1BcVofVcEKMIwzlx4NjHlcjhoxxKBNLtz15Y1z7MYf549DFvkHSId02Ax6kGwWntIBPC3l/JZcmw==}
......@@ -4870,20 +4858,12 @@ packages:
negotiator: 0.6.3
dev: false
 
/acorn-import-assertions@1.9.0(acorn@8.10.0):
resolution: {integrity: sha512-cmMwop9x+8KFhxvKrKfPYmN6/pKTYYHBqLa0DfvVZcKMJWNyWLnaqND7dx/qn66R7ewM1UX5XMaDVP5wlVTaVA==}
peerDependencies:
acorn: ^8
dependencies:
acorn: 8.10.0
/acorn-import-assertions@1.9.0(acorn@8.11.2):
resolution: {integrity: sha512-cmMwop9x+8KFhxvKrKfPYmN6/pKTYYHBqLa0DfvVZcKMJWNyWLnaqND7dx/qn66R7ewM1UX5XMaDVP5wlVTaVA==}
peerDependencies:
acorn: ^8
dependencies:
acorn: 8.11.2
dev: false
 
/acorn-jsx@5.3.2(acorn@8.11.2):
resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==}
......@@ -4896,11 +4876,6 @@ packages:
resolution: {integrity: sha512-FS7hV565M5l1R08MXqo8odwMTB02C2UqzB17RVgu9EyuYFBqJZ3/ZY97sQD5FewVu1UyDFc1yztUDrAwT0EypA==}
engines: {node: '>=0.4.0'}
 
/acorn@8.10.0:
resolution: {integrity: sha512-F0SAmZ8iUtS//m8DmCTA0jlh6TDKkHQyK6xc6V4KDTyZKA9dnvX9/3sRTVQrWm79glUAZbnmmNcdYwUIHWVybw==}
engines: {node: '>=0.4.0'}
hasBin: true
/acorn@8.11.2:
resolution: {integrity: sha512-nc0Axzp/0FILLEVsm4fNwLCwMttvhEI263QtVPQcbpfZZ3ts0hLsZGOpE6czNlid7CJ9MlyH8reXkpsf3YUY4w==}
engines: {node: '>=0.4.0'}
......@@ -14746,37 +14721,6 @@ packages:
yn: 3.1.1
dev: true
 
/ts-node@10.9.1(@types/node@18.18.7)(typescript@5.3.2):
resolution: {integrity: sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==}
hasBin: true
peerDependencies:
'@swc/core': '>=1.2.50'
'@swc/wasm': '>=1.2.50'
'@types/node': '*'
typescript: '>=2.7'
peerDependenciesMeta:
'@swc/core':
optional: true
'@swc/wasm':
optional: true
dependencies:
'@cspotcode/source-map-support': 0.8.1
'@tsconfig/node10': 1.0.9
'@tsconfig/node12': 1.0.11
'@tsconfig/node14': 1.0.3
'@tsconfig/node16': 1.0.4
'@types/node': 18.18.7
acorn: 8.11.2
acorn-walk: 8.3.0
arg: 4.1.3
create-require: 1.1.1
diff: 4.0.2
make-error: 1.3.6
typescript: 5.3.2
v8-compile-cache-lib: 3.0.1
yn: 3.1.1
dev: true
/ts-node@10.9.1(@types/node@18.18.8)(typescript@5.3.2):
resolution: {integrity: sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==}
hasBin: true
......@@ -15675,8 +15619,8 @@ packages:
'@webassemblyjs/ast': 1.11.6
'@webassemblyjs/wasm-edit': 1.11.6
'@webassemblyjs/wasm-parser': 1.11.6
acorn: 8.10.0
acorn-import-assertions: 1.9.0(acorn@8.10.0)
acorn: 8.11.2
acorn-import-assertions: 1.9.0(acorn@8.11.2)
browserslist: 4.22.1
chrome-trace-event: 1.0.3
enhanced-resolve: 5.15.0
......
packages:
- "apps/*"
- "packages/*"
- "examples/"
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment