From f378170ea523d30556953f1ab6457bddc0ac33c8 Mon Sep 17 00:00:00 2001 From: Yi Ding <yi.s.ding@gmail.com> Date: Thu, 22 Jun 2023 23:25:46 -0700 Subject: [PATCH] initial service context --- apps/simple/essay.ts | 4 ++- apps/simple/package.json | 3 +++ packages/core/src/LLMPredictor.ts | 6 ++--- packages/core/src/LanguageModel.ts | 4 +-- packages/core/src/NodeParser.ts | 4 ++- packages/core/src/ResponseSynthesizer.ts | 8 +++--- packages/core/src/ServiceContext.ts | 33 +++++++++++++++++++++--- 7 files changed, 47 insertions(+), 15 deletions(-) diff --git a/apps/simple/essay.ts b/apps/simple/essay.ts index b0969883f..b478dbbb0 100644 --- a/apps/simple/essay.ts +++ b/apps/simple/essay.ts @@ -1,4 +1,4 @@ -export default `What I Worked On +const essay = `What I Worked On February 2021 @@ -349,3 +349,5 @@ But if so there's no reason to suppose that this is the limit of the language th Thanks to Trevor Blackwell, John Collison, Patrick Collison, Daniel Gackle, Ralph Hazell, Jessica Livingston, Robert Morris, and Harj Taggar for reading drafts of this.`; + +export default essay; diff --git a/apps/simple/package.json b/apps/simple/package.json index a5a22b25e..57bc9065e 100644 --- a/apps/simple/package.json +++ b/apps/simple/package.json @@ -5,5 +5,8 @@ }, "devDependencies": { "@types/node": "^18" + }, + "scripts": { + "lint": "eslint ." } } diff --git a/packages/core/src/LLMPredictor.ts b/packages/core/src/LLMPredictor.ts index 0a7618b8d..9df10ce6b 100644 --- a/packages/core/src/LLMPredictor.ts +++ b/packages/core/src/LLMPredictor.ts @@ -2,11 +2,11 @@ import { ChatOpenAI } from "./LanguageModel"; export interface BaseLLMPredictor { getLlmMetadata(): Promise<any>; - predict(prompt: string, options: any): Promise<string>; + apredict(prompt: string, options: any): Promise<string>; // stream(prompt: string, options: any): Promise<any>; } -export class ChatOpenAILLMPredictor implements BaseLLMPredictor { +export class ChatGPTLLMPredictor implements BaseLLMPredictor { llm: string; retryOnThrottling: boolean; languageModel: ChatOpenAI; @@ -25,7 +25,7 @@ export class ChatOpenAILLMPredictor implements BaseLLMPredictor { throw new Error("Not implemented yet"); } - async predict(prompt: string, options: any) { + async apredict(prompt: string, options: any) { return this.languageModel.agenerate([ { content: prompt, diff --git a/packages/core/src/LanguageModel.ts b/packages/core/src/LanguageModel.ts index 3ca0af7cf..cf0d1307f 100644 --- a/packages/core/src/LanguageModel.ts +++ b/packages/core/src/LanguageModel.ts @@ -8,7 +8,7 @@ import { interface LLMResult {} -interface BaseLanguageModel {} +export interface BaseLanguageModel {} type MessageType = "human" | "ai" | "system" | "generic" | "function"; @@ -26,7 +26,7 @@ interface LLMResult { generations: Generation[][]; // Each input can have more than one generations } -class BaseChatModel implements BaseLanguageModel {} +export class BaseChatModel implements BaseLanguageModel {} export class ChatOpenAI extends BaseChatModel { model: string; diff --git a/packages/core/src/NodeParser.ts b/packages/core/src/NodeParser.ts index 8a13f2f3e..1ec66f6f6 100644 --- a/packages/core/src/NodeParser.ts +++ b/packages/core/src/NodeParser.ts @@ -29,7 +29,9 @@ export function getNodesFromDocument( return nodes; } -interface NodeParser {} +export interface NodeParser { + getNodesFromDocuments(documents: Document[]): Node[]; +} export class SimpleNodeParser implements NodeParser { textSplitter: SentenceSplitter; diff --git a/packages/core/src/ResponseSynthesizer.ts b/packages/core/src/ResponseSynthesizer.ts index 3b26d3a0a..b7e23f800 100644 --- a/packages/core/src/ResponseSynthesizer.ts +++ b/packages/core/src/ResponseSynthesizer.ts @@ -1,4 +1,4 @@ -import { ChatOpenAILLMPredictor } from "./LLMPredictor"; +import { ChatGPTLLMPredictor } from "./LLMPredictor"; import { NodeWithScore } from "./Node"; import { SimplePrompt, defaultTextQaPrompt } from "./Prompt"; import { Response } from "./Response"; @@ -8,11 +8,11 @@ interface BaseResponseBuilder { } export class SimpleResponseBuilder { - llmPredictor: ChatOpenAILLMPredictor; + llmPredictor: ChatGPTLLMPredictor; textQATemplate: SimplePrompt; constructor() { - this.llmPredictor = new ChatOpenAILLMPredictor(); + this.llmPredictor = new ChatGPTLLMPredictor(); this.textQATemplate = defaultTextQaPrompt; } @@ -23,7 +23,7 @@ export class SimpleResponseBuilder { }; const prompt = this.textQATemplate(input); - return this.llmPredictor.predict(prompt, {}); + return this.llmPredictor.apredict(prompt, {}); } } diff --git a/packages/core/src/ServiceContext.ts b/packages/core/src/ServiceContext.ts index 10fcb25b9..68b7c1fb2 100644 --- a/packages/core/src/ServiceContext.ts +++ b/packages/core/src/ServiceContext.ts @@ -1,8 +1,33 @@ -interface ServiceContext { - llmPredictor?: any; +import { BaseEmbedding, OpenAIEmbedding } from "./Embedding"; +import { BaseLLMPredictor, ChatGPTLLMPredictor } from "./LLMPredictor"; +import { BaseLanguageModel } from "./LanguageModel"; +import { NodeParser, SimpleNodeParser } from "./NodeParser"; + +export interface ServiceContext { + llmPredictor: BaseLLMPredictor; // promptHelper: any; - embedModel: any; - nodeParser: any; + embedModel: BaseEmbedding; + nodeParser: NodeParser; // llamaLogger: any; // callbackManager: any; } + +export interface ServiceContextOptions { + llmPredictor?: BaseLLMPredictor; + llm?: BaseLanguageModel; + embedModel?: BaseEmbedding; + nodeParser?: NodeParser; + // NodeParser arguments + chunkSize?: number; + chunkOverlap: number; +} + +export function serviceContextFromDefaults(options: ServiceContextOptions) { + const serviceContext: ServiceContext = { + llmPredictor: options.llmPredictor ?? new ChatGPTLLMPredictor(), + embedModel: options.embedModel ?? new OpenAIEmbedding(), + nodeParser: options.nodeParser ?? new SimpleNodeParser(), + }; + + return serviceContext; +} -- GitLab