Skip to content
Snippets Groups Projects
Unverified Commit daf8522b authored by Thuc Pham's avatar Thuc Pham Committed by GitHub
Browse files

feat: use mock llm (#1492)

parent 223f3136
No related branches found
No related tags found
No related merge requests found
...@@ -6,6 +6,7 @@ const withMDX = createMDX(); ...@@ -6,6 +6,7 @@ const withMDX = createMDX();
const config = { const config = {
reactStrictMode: true, reactStrictMode: true,
transpilePackages: ["monaco-editor"], transpilePackages: ["monaco-editor"],
serverExternalPackages: ["@huggingface/transformers"],
webpack: (config, { isServer }) => { webpack: (config, { isServer }) => {
if (Array.isArray(config.target) && config.target.includes("web")) { if (Array.isArray(config.target) && config.target.includes("web")) {
config.target = ["web", "es2020"]; config.target = ["web", "es2020"];
...@@ -26,6 +27,7 @@ const config = { ...@@ -26,6 +27,7 @@ const config = {
}), }),
); );
} }
config.resolve.alias["replicate"] = false;
return config; return config;
}, },
}; };
......
import { Message } from "ai"; import { llm } from "@/lib/utils";
import { simulateReadableStream } from "ai/test"; import { LlamaIndexAdapter, type Message } from "ai";
import { NextRequest, NextResponse } from "next/server"; import { Settings, SimpleChatEngine, type ChatMessage } from "llamaindex";
import { NextResponse, type NextRequest } from "next/server";
Settings.llm = llm;
export async function POST(request: NextRequest) { export async function POST(request: NextRequest) {
try { try {
...@@ -12,19 +15,16 @@ export async function POST(request: NextRequest) { ...@@ -12,19 +15,16 @@ export async function POST(request: NextRequest) {
{ status: 400 }, { status: 400 },
); );
} }
const mockResponse = `Hello! This is a mock response to: ${userMessage.content}`;
return new Response( const chatEngine = new SimpleChatEngine();
simulateReadableStream({
chunkDelayInMs: 20, return LlamaIndexAdapter.toDataStreamResponse(
values: mockResponse.split(" ").map((t) => `0:"${t} "\n`), await chatEngine.chat({
}).pipeThrough(new TextEncoderStream()), message: userMessage.content,
{ chatHistory: messages as ChatMessage[],
status: 200, stream: true,
headers: { }),
"X-Vercel-AI-Data-Stream": "v1", {},
"Content-Type": "text/plain; charset=utf-8",
},
},
); );
} catch (error) { } catch (error) {
const detail = (error as Error).message; const detail = (error as Error).message;
......
import { llm } from "@/lib/utils";
import { Markdown } from "@llamaindex/chat-ui/widgets"; import { Markdown } from "@llamaindex/chat-ui/widgets";
import { generateId, Message, parseStreamPart } from "ai"; import { generateId, Message } from "ai";
import { createAI, createStreamableUI, getMutableAIState } from "ai/rsc"; import { createAI, createStreamableUI, getMutableAIState } from "ai/rsc";
import { simulateReadableStream } from "ai/test"; import { type ChatMessage, Settings, SimpleChatEngine } from "llamaindex";
import { ReactNode } from "react"; import { ReactNode } from "react";
type ServerState = Message[]; type ServerState = Message[];
...@@ -10,6 +11,8 @@ type Actions = { ...@@ -10,6 +11,8 @@ type Actions = {
chat: (message: Message) => Promise<Message & { display: ReactNode }>; chat: (message: Message) => Promise<Message & { display: ReactNode }>;
}; };
Settings.llm = llm;
export const AI = createAI<ServerState, FrontendState, Actions>({ export const AI = createAI<ServerState, FrontendState, Actions>({
initialAIState: [], initialAIState: [],
initialUIState: [], initialUIState: [],
...@@ -20,31 +23,30 @@ export const AI = createAI<ServerState, FrontendState, Actions>({ ...@@ -20,31 +23,30 @@ export const AI = createAI<ServerState, FrontendState, Actions>({
const aiState = getMutableAIState<typeof AI>(); const aiState = getMutableAIState<typeof AI>();
aiState.update((prev) => [...prev, message]); aiState.update((prev) => [...prev, message]);
const mockResponse = `Hello! This is a mock response to: ${message.content}`;
const responseStream = simulateReadableStream({
chunkDelayInMs: 20,
values: mockResponse.split(" ").map((t) => `0:"${t} "\n`),
});
const uiStream = createStreamableUI(); const uiStream = createStreamableUI();
const chatEngine = new SimpleChatEngine();
const assistantMessage: Message = { const assistantMessage: Message = {
id: generateId(), id: generateId(),
role: "assistant", role: "assistant",
content: "", content: "",
}; };
responseStream.pipeTo( // run the async function without blocking
new WritableStream({ (async () => {
write: async (message) => { const chatResponse = await chatEngine.chat({
assistantMessage.content += parseStreamPart(message).value; stream: true,
uiStream.update(<Markdown content={assistantMessage.content} />); message: message.content,
}, chatHistory: aiState.get() as ChatMessage[],
close: () => { });
aiState.done([...aiState.get(), assistantMessage]);
uiStream.done(); for await (const chunk of chatResponse) {
}, assistantMessage.content += chunk.delta;
}), uiStream.update(<Markdown content={assistantMessage.content} />);
); }
aiState.done([...aiState.get(), assistantMessage]);
uiStream.done();
})();
return { return {
...assistantMessage, ...assistantMessage,
......
import { clsx, type ClassValue } from "clsx" import { clsx, type ClassValue } from "clsx";
import { twMerge } from "tailwind-merge" import { LLM, LLMMetadata } from "llamaindex";
import { twMerge } from "tailwind-merge";
export function cn(...inputs: ClassValue[]) { export function cn(...inputs: ClassValue[]) {
return twMerge(clsx(inputs)) return twMerge(clsx(inputs))
} }
class MockLLM {
metadata: LLMMetadata = {
model: "MockLLM",
temperature: 0.5,
topP: 0.5,
contextWindow: 1024,
tokenizer: undefined,
};
chat() {
const mockResponse = "Hello! This is a mock response";
return Promise.resolve(
new ReadableStream({
async start(controller) {
for (const char of mockResponse) {
controller.enqueue({ delta: char });
await new Promise((resolve) => setTimeout(resolve, 20));
}
controller.close();
},
}),
);
}
}
export const llm = new MockLLM() as unknown as LLM;
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment