Skip to content
Snippets Groups Projects
Unverified Commit f74dea5f authored by Thuc Pham's avatar Thuc Pham Committed by GitHub
Browse files

feat(express): support showing image on chat message express backend (#380)

parent ee3eb7d8
No related branches found
No related tags found
No related merge requests found
---
"create-llama": patch
---
feat: support showing image on chat message
......@@ -35,7 +35,7 @@ export const chat = async (req: Request, res: Response) => {
}
const llm = new OpenAI({
model: process.env.MODEL || "gpt-3.5-turbo",
model: (process.env.MODEL as any) || "gpt-3.5-turbo",
});
const chatEngine = await createChatEngine(llm);
......@@ -54,9 +54,24 @@ export const chat = async (req: Request, res: Response) => {
});
// Return a stream, which can be consumed by the Vercel/AI client
const stream = LlamaIndexStream(response);
const { stream, data: streamData } = LlamaIndexStream(response, {
parserOptions: {
image_url: data?.imageUrl,
},
});
streamToResponse(stream, res);
// Pipe LlamaIndexStream to response
const processedStream = stream.pipeThrough(streamData.stream);
return streamToResponse(processedStream, res, {
headers: {
// response MUST have the `X-Experimental-Stream-Data: 'true'` header
// so that the client uses the correct parsing logic, see
// https://sdk.vercel.ai/docs/api-reference/stream-data#on-the-server
"X-Experimental-Stream-Data": "true",
"Content-Type": "text/plain; charset=utf-8",
"Access-Control-Expose-Headers": "X-Experimental-Stream-Data",
},
});
} catch (error) {
console.error("[LlamaIndex]", error);
return res.status(500).json({
......
import {
JSONValue,
createCallbacksTransformer,
createStreamDataTransformer,
experimental_StreamData,
trimStartOfStreamHelper,
type AIStreamCallbacksAndOptions,
} from "ai";
import { Response } from "llamaindex";
function createParser(res: AsyncIterable<Response>) {
type ParserOptions = {
image_url?: string;
};
function createParser(
res: AsyncIterable<Response>,
data: experimental_StreamData,
opts?: ParserOptions,
) {
const it = res[Symbol.asyncIterator]();
const trimStartOfStream = trimStartOfStreamHelper();
return new ReadableStream<string>({
start() {
// if image_url is provided, send it via the data stream
if (opts?.image_url) {
const message: JSONValue = {
type: "image_url",
image_url: {
url: opts.image_url,
},
};
data.append(message);
} else {
data.append({}); // send an empty image response for the user's message
}
},
async pull(controller): Promise<void> {
const { value, done } = await it.next();
if (done) {
controller.close();
data.append({}); // send an empty image response for the assistant's message
data.close();
return;
}
......@@ -27,11 +53,16 @@ function createParser(res: AsyncIterable<Response>) {
export function LlamaIndexStream(
res: AsyncIterable<Response>,
callbacks?: AIStreamCallbacksAndOptions,
): ReadableStream {
return createParser(res)
.pipeThrough(createCallbacksTransformer(callbacks))
.pipeThrough(
createStreamDataTransformer(callbacks?.experimental_streamData),
);
opts?: {
callbacks?: AIStreamCallbacksAndOptions;
parserOptions?: ParserOptions;
},
): { stream: ReadableStream; data: experimental_StreamData } {
const data = new experimental_StreamData();
return {
stream: createParser(res, data, opts?.parserOptions)
.pipeThrough(createCallbacksTransformer(opts?.callbacks))
.pipeThrough(createStreamDataTransformer(true)),
data,
};
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment