diff --git a/apps/simple/essay.ts b/apps/simple/essay.ts index b0969883f13ed8234446892b1e44e7833557cf3a..b478dbbb01ccb1ba2269ab11f928adad4d9618ff 100644 --- a/apps/simple/essay.ts +++ b/apps/simple/essay.ts @@ -1,4 +1,4 @@ -export default `What I Worked On +const essay = `What I Worked On February 2021 @@ -349,3 +349,5 @@ But if so there's no reason to suppose that this is the limit of the language th Thanks to Trevor Blackwell, John Collison, Patrick Collison, Daniel Gackle, Ralph Hazell, Jessica Livingston, Robert Morris, and Harj Taggar for reading drafts of this.`; + +export default essay; diff --git a/apps/simple/package.json b/apps/simple/package.json index a5a22b25e7d1608ad5298e6c35f1dd9abd9d4bcd..57bc9065ea80dc6a1a5bd62c554ccd212f8fc5b9 100644 --- a/apps/simple/package.json +++ b/apps/simple/package.json @@ -5,5 +5,8 @@ }, "devDependencies": { "@types/node": "^18" + }, + "scripts": { + "lint": "eslint ." } } diff --git a/packages/core/src/LLMPredictor.ts b/packages/core/src/LLMPredictor.ts index 0a7618b8d845d3752d2a3679cd85fa4d15498794..9df10ce6bc66f8f1cced27c504fae21ae63de5e9 100644 --- a/packages/core/src/LLMPredictor.ts +++ b/packages/core/src/LLMPredictor.ts @@ -2,11 +2,11 @@ import { ChatOpenAI } from "./LanguageModel"; export interface BaseLLMPredictor { getLlmMetadata(): Promise<any>; - predict(prompt: string, options: any): Promise<string>; + apredict(prompt: string, options: any): Promise<string>; // stream(prompt: string, options: any): Promise<any>; } -export class ChatOpenAILLMPredictor implements BaseLLMPredictor { +export class ChatGPTLLMPredictor implements BaseLLMPredictor { llm: string; retryOnThrottling: boolean; languageModel: ChatOpenAI; @@ -25,7 +25,7 @@ export class ChatOpenAILLMPredictor implements BaseLLMPredictor { throw new Error("Not implemented yet"); } - async predict(prompt: string, options: any) { + async apredict(prompt: string, options: any) { return this.languageModel.agenerate([ { content: prompt, diff --git a/packages/core/src/LanguageModel.ts b/packages/core/src/LanguageModel.ts index 3ca0af7cfbd3b31323de13b1ce33daf16dc308fb..cf0d1307f8d6ef8e9b3d591bf384b5ced3ebfa31 100644 --- a/packages/core/src/LanguageModel.ts +++ b/packages/core/src/LanguageModel.ts @@ -8,7 +8,7 @@ import { interface LLMResult {} -interface BaseLanguageModel {} +export interface BaseLanguageModel {} type MessageType = "human" | "ai" | "system" | "generic" | "function"; @@ -26,7 +26,7 @@ interface LLMResult { generations: Generation[][]; // Each input can have more than one generations } -class BaseChatModel implements BaseLanguageModel {} +export class BaseChatModel implements BaseLanguageModel {} export class ChatOpenAI extends BaseChatModel { model: string; diff --git a/packages/core/src/NodeParser.ts b/packages/core/src/NodeParser.ts index 8a13f2f3e4f78be66022e2c2f6ca69ae11228eb3..1ec66f6f643abaef7cc2c6f3d98f9dc442b3a156 100644 --- a/packages/core/src/NodeParser.ts +++ b/packages/core/src/NodeParser.ts @@ -29,7 +29,9 @@ export function getNodesFromDocument( return nodes; } -interface NodeParser {} +export interface NodeParser { + getNodesFromDocuments(documents: Document[]): Node[]; +} export class SimpleNodeParser implements NodeParser { textSplitter: SentenceSplitter; diff --git a/packages/core/src/ResponseSynthesizer.ts b/packages/core/src/ResponseSynthesizer.ts index 3b26d3a0abb1f2eef5fcf7df2746eb01a2b35d6b..b7e23f800d2dc1d95afdc1adb045d2de336b78cf 100644 --- a/packages/core/src/ResponseSynthesizer.ts +++ b/packages/core/src/ResponseSynthesizer.ts @@ -1,4 +1,4 @@ -import { ChatOpenAILLMPredictor } from "./LLMPredictor"; +import { ChatGPTLLMPredictor } from "./LLMPredictor"; import { NodeWithScore } from "./Node"; import { SimplePrompt, defaultTextQaPrompt } from "./Prompt"; import { Response } from "./Response"; @@ -8,11 +8,11 @@ interface BaseResponseBuilder { } export class SimpleResponseBuilder { - llmPredictor: ChatOpenAILLMPredictor; + llmPredictor: ChatGPTLLMPredictor; textQATemplate: SimplePrompt; constructor() { - this.llmPredictor = new ChatOpenAILLMPredictor(); + this.llmPredictor = new ChatGPTLLMPredictor(); this.textQATemplate = defaultTextQaPrompt; } @@ -23,7 +23,7 @@ export class SimpleResponseBuilder { }; const prompt = this.textQATemplate(input); - return this.llmPredictor.predict(prompt, {}); + return this.llmPredictor.apredict(prompt, {}); } } diff --git a/packages/core/src/ServiceContext.ts b/packages/core/src/ServiceContext.ts index 10fcb25b958bc80d5f6d87318656f7f0572a2034..68b7c1fb293bbdcfafd9038167ea98ee29e513b4 100644 --- a/packages/core/src/ServiceContext.ts +++ b/packages/core/src/ServiceContext.ts @@ -1,8 +1,33 @@ -interface ServiceContext { - llmPredictor?: any; +import { BaseEmbedding, OpenAIEmbedding } from "./Embedding"; +import { BaseLLMPredictor, ChatGPTLLMPredictor } from "./LLMPredictor"; +import { BaseLanguageModel } from "./LanguageModel"; +import { NodeParser, SimpleNodeParser } from "./NodeParser"; + +export interface ServiceContext { + llmPredictor: BaseLLMPredictor; // promptHelper: any; - embedModel: any; - nodeParser: any; + embedModel: BaseEmbedding; + nodeParser: NodeParser; // llamaLogger: any; // callbackManager: any; } + +export interface ServiceContextOptions { + llmPredictor?: BaseLLMPredictor; + llm?: BaseLanguageModel; + embedModel?: BaseEmbedding; + nodeParser?: NodeParser; + // NodeParser arguments + chunkSize?: number; + chunkOverlap: number; +} + +export function serviceContextFromDefaults(options: ServiceContextOptions) { + const serviceContext: ServiceContext = { + llmPredictor: options.llmPredictor ?? new ChatGPTLLMPredictor(), + embedModel: options.embedModel ?? new OpenAIEmbedding(), + nodeParser: options.nodeParser ?? new SimpleNodeParser(), + }; + + return serviceContext; +}