Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
import {
ContextChatEngine,
LLM,
serviceContextFromDefaults,
SimpleDocumentStore,
storageContextFromDefaults,
VectorStoreIndex,
} from "llamaindex";
import { CHUNK_OVERLAP, CHUNK_SIZE, STORAGE_CACHE_DIR } from "./constants.mjs";
async function getDataSource(llm: LLM) {
const serviceContext = serviceContextFromDefaults({
llm,
chunkSize: CHUNK_SIZE,
chunkOverlap: CHUNK_OVERLAP,
});
let storageContext = await storageContextFromDefaults({
persistDir: `${STORAGE_CACHE_DIR}`,
});
const numberOfDocs = Object.keys(
(storageContext.docStore as SimpleDocumentStore).toDict(),
).length;
if (numberOfDocs === 0) {
throw new Error(
`StorageContext is empty - call 'npm run generate' to generate the storage first`,
);
}
return await VectorStoreIndex.init({
storageContext,
serviceContext,
});
}
export async function createChatEngine(llm: LLM) {
const index = await getDataSource(llm);
const retriever = index.asRetriever();
retriever.similarityTopK = 5;
return new ContextChatEngine({
chatModel: llm,
retriever,
});
}