Skip to content
Snippets Groups Projects
Unverified Commit 83ebdfb1 authored by Alex Yang's avatar Alex Yang Committed by GitHub
Browse files

fix: next.js binding (#997)

parent 835b1ac0
No related branches found
Tags llamaindex@0.0.9
No related merge requests found
---
"@llamaindex/next-node-runtime-test": patch
"llamaindex": patch
---
fix: next.js build error
"use server";
import {
OpenAI,
OpenAIAgent,
QueryEngineTool,
Settings,
VectorStoreIndex,
} from "llamaindex";
import { HuggingFaceEmbedding } from "llamaindex/embeddings/HuggingFaceEmbedding";
import { SimpleDirectoryReader } from "llamaindex/readers/SimpleDirectoryReader";
Settings.llm = new OpenAI({
// eslint-disable-next-line turbo/no-undeclared-env-vars
apiKey: process.env.NEXT_PUBLIC_OPENAI_KEY ?? "FAKE_KEY_TO_PASS_TESTS",
model: "gpt-4o",
});
Settings.embedModel = new HuggingFaceEmbedding({
modelType: "BAAI/bge-small-en-v1.5",
quantized: false,
});
Settings.callbackManager.on("llm-tool-call", (event) => {
console.log(event.detail.payload);
});
Settings.callbackManager.on("llm-tool-result", (event) => {
console.log(event.detail.payload);
});
export async function getOpenAIModelRequest(query: string) {
try {
const currentDir = __dirname;
// load our data and create a query engine
const reader = new SimpleDirectoryReader();
const documents = await reader.loadData(currentDir);
const index = await VectorStoreIndex.fromDocuments(documents);
const retriever = index.asRetriever({
similarityTopK: 10,
});
const queryEngine = index.asQueryEngine({
retriever,
});
// define the query engine as a tool
const tools = [
new QueryEngineTool({
queryEngine: queryEngine,
metadata: {
name: "deployment_details_per_env",
description: `This tool can answer detailed questions about deployments happened in various environments.`,
},
}),
];
// create the agent
const agent = new OpenAIAgent({ tools });
const { response } = await agent.chat({
message: query,
});
return {
message: response,
};
} catch (err) {
console.error(err);
return {
errors: "Error Calling OpenAI Model",
};
}
}
import { getOpenAIModelRequest } from "@/actions/openai";
import { NextRequest, NextResponse } from "next/server";
// POST /api/openai
export async function POST(request: NextRequest) {
const body = await request.json();
const content = await getOpenAIModelRequest(body.query);
return NextResponse.json(content, { status: 200 });
}
......@@ -16,23 +16,23 @@
* @module
*/
export default function withLlamaIndex(config: any) {
config.experimental = config.experimental ?? {};
config.experimental.serverComponentsExternalPackages =
config.experimental.serverComponentsExternalPackages ?? [];
config.experimental.serverComponentsExternalPackages.push(
"@xenova/transformers",
);
const userWebpack = config.webpack;
//#region hack for `@xenova/transformers`
// Ignore node-specific modules when bundling for the browser
// See https://webpack.js.org/configuration/resolve/#resolvealias
config.webpack = function (webpackConfig: any) {
if (userWebpack) {
webpackConfig = userWebpack(webpackConfig);
}
webpackConfig.resolve.alias = {
...webpackConfig.resolve.alias,
sharp$: false,
"onnxruntime-node$": false,
"@google-cloud/vertexai": false,
"groq-sdk": false,
};
return webpackConfig;
};
//#endregion
return config;
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment