diff --git a/.changeset/fair-penguins-trade.md b/.changeset/fair-penguins-trade.md
new file mode 100644
index 0000000000000000000000000000000000000000..54ca229a1505d2773c0c2cbc353c47427822cdad
--- /dev/null
+++ b/.changeset/fair-penguins-trade.md
@@ -0,0 +1,6 @@
+---
+"llamaindex": patch
+"@llamaindex/edge": patch
+---
+
+fix: build error on next.js nodejs runtime
diff --git a/packages/core/e2e/examples/nextjs-agent/src/app/test/page.tsx b/packages/core/e2e/examples/nextjs-agent/src/app/test/page.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..e1540bfa1601271594444172f6dadd6e12979827
--- /dev/null
+++ b/packages/core/e2e/examples/nextjs-agent/src/app/test/page.tsx
@@ -0,0 +1,5 @@
+import "llamaindex";
+
+export default function Page() {
+  return "hello world!";
+}
diff --git a/packages/core/src/embeddings/ClipEmbedding.ts b/packages/core/src/embeddings/ClipEmbedding.ts
index 0dab86411ab65aeb8e49f9537d644d05ef7a5c3d..5cd6f1bc39abd1b892d084fd4009180141cffbfc 100644
--- a/packages/core/src/embeddings/ClipEmbedding.ts
+++ b/packages/core/src/embeddings/ClipEmbedding.ts
@@ -3,7 +3,10 @@ import type { ImageType } from "../Node.js";
 import { MultiModalEmbedding } from "./MultiModalEmbedding.js";
 
 async function readImage(input: ImageType) {
-  const { RawImage } = await import("@xenova/transformers");
+  const { RawImage } = await import(
+    /* webpackIgnore: true */
+    "@xenova/transformers"
+  );
   if (input instanceof Blob) {
     return await RawImage.fromBlob(input);
   } else if (_.isString(input) || input instanceof URL) {
@@ -29,7 +32,10 @@ export class ClipEmbedding extends MultiModalEmbedding {
 
   async getTokenizer() {
     if (!this.tokenizer) {
-      const { AutoTokenizer } = await import("@xenova/transformers");
+      const { AutoTokenizer } = await import(
+        /* webpackIgnore: true */
+        "@xenova/transformers"
+      );
       this.tokenizer = await AutoTokenizer.from_pretrained(this.modelType);
     }
     return this.tokenizer;
@@ -37,7 +43,10 @@ export class ClipEmbedding extends MultiModalEmbedding {
 
   async getProcessor() {
     if (!this.processor) {
-      const { AutoProcessor } = await import("@xenova/transformers");
+      const { AutoProcessor } = await import(
+        /* webpackIgnore: true */
+        "@xenova/transformers"
+      );
       this.processor = await AutoProcessor.from_pretrained(this.modelType);
     }
     return this.processor;
@@ -46,6 +55,7 @@ export class ClipEmbedding extends MultiModalEmbedding {
   async getVisionModel() {
     if (!this.visionModel) {
       const { CLIPVisionModelWithProjection } = await import(
+        /* webpackIgnore: true */
         "@xenova/transformers"
       );
       this.visionModel = await CLIPVisionModelWithProjection.from_pretrained(
@@ -59,6 +69,7 @@ export class ClipEmbedding extends MultiModalEmbedding {
   async getTextModel() {
     if (!this.textModel) {
       const { CLIPTextModelWithProjection } = await import(
+        /* webpackIgnore: true */
         "@xenova/transformers"
       );
       this.textModel = await CLIPTextModelWithProjection.from_pretrained(
diff --git a/packages/core/src/objects/base.ts b/packages/core/src/objects/base.ts
index 3eade2994c735a1188172a2b5c31e542deae2aaa..b9b529a0ba570b7692ef651f9a540cb3e4e01a70 100644
--- a/packages/core/src/objects/base.ts
+++ b/packages/core/src/objects/base.ts
@@ -1,7 +1,7 @@
 import type { BaseNode, Metadata } from "../Node.js";
 import { TextNode } from "../Node.js";
 import type { BaseRetriever } from "../Retriever.js";
-import type { VectorStoreIndex } from "../indices/index.js";
+import type { VectorStoreIndex } from "../indices/vectorStore/index.js";
 import type { MessageContent } from "../llm/index.js";
 import { extractText } from "../llm/utils.js";
 import type { BaseTool } from "../types.js";