diff --git a/.changeset/tame-chairs-shake.md b/.changeset/tame-chairs-shake.md
new file mode 100644
index 0000000000000000000000000000000000000000..7e9e02b1de18741efa78c049a8be812c1f9acbd5
--- /dev/null
+++ b/.changeset/tame-chairs-shake.md
@@ -0,0 +1,9 @@
+---
+"llamaindex": patch
+"@llamaindex/deepseek": patch
+"@llamaindex/fireworks": patch
+"@llamaindex/together": patch
+"@llamaindex/jinaai": patch
+---
+
+fix: prefer using embedding model from vector store
diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/jinaai.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/jinaai.mdx
index 6a308ec0d62673312500a073fae02f7a8a0eff62..0af1e1af282ab51bb875a1cc8389186f2de93ffd 100644
--- a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/jinaai.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/jinaai.mdx
@@ -2,10 +2,11 @@
 title: Jina AI
 ---
 
-To use Jina AI embeddings, you need to import `JinaAIEmbedding` from `llamaindex`.
+To use Jina AI embeddings, you need to import `JinaAIEmbedding` from `@llamaindex/jinaai`.
 
 ```ts
-import { JinaAIEmbedding, Settings } from "llamaindex";
+import { Settings } from "llamaindex";
+import { JinaAIEmbedding } from "@llamaindex/jinaai";
 
 Settings.embedModel = new JinaAIEmbedding();
 
diff --git a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/together.mdx b/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/together.mdx
index 7d5a6a832e2117dc6296cde237c20ad971163edb..30c83e0fa1e13e4164823d4cd3d70915a5e496ad 100644
--- a/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/together.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/embeddings/available_embeddings/together.mdx
@@ -2,10 +2,11 @@
 title: Together
 ---
 
-To use together embeddings, you need to import `TogetherEmbedding` from `llamaindex`.
+To use together embeddings, you need to import `TogetherEmbedding` from `@llamaindex/together`.
 
 ```ts
-import { TogetherEmbedding, Settings } from "llamaindex";
+import { Settings } from "llamaindex";
+import { TogetherEmbedding } from "@llamaindex/together";
 
 Settings.embedModel = new TogetherEmbedding({
   apiKey: "<YOUR_API_KEY>",
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/deepseek.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/deepseek.mdx
index 928164ec87a89cc87b54d8b9b1b7ec43230d327e..e197eff56a2e7d95eecb5ccf8d0429a3510a26ba 100644
--- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/deepseek.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/deepseek.mdx
@@ -7,7 +7,8 @@ title: DeepSeek LLM
 ## Usage
 
 ```ts
-import { DeepSeekLLM, Settings } from "llamaindex";
+import { Settings } from "llamaindex";
+import { DeepSeekLLM } from "@llamaindex/deepseek";
 
 Settings.llm = new DeepSeekLLM({
   apiKey: "<YOUR_API_KEY>",
@@ -18,7 +19,8 @@ Settings.llm = new DeepSeekLLM({
 ## Example
 
 ```ts
-import { DeepSeekLLM, Document, VectorStoreIndex, Settings } from "llamaindex";
+import { Document, VectorStoreIndex, Settings } from "llamaindex";
+import { DeepSeekLLM } from "@llamaindex/deepseek";
 
 const deepseekLlm = new DeepSeekLLM({
   apiKey: "<YOUR_API_KEY>",
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/fireworks.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/fireworks.mdx
index 1680de683d157ec272e79d84aae5e87cd14176c9..328ba4e24867cc6e360f318ad50a540ef1e0f757 100644
--- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/fireworks.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/fireworks.mdx
@@ -7,7 +7,8 @@ title: Fireworks LLM
 ## Usage
 
 ```ts
-import { FireworksLLM, Settings } from "llamaindex";
+import { Settings } from "llamaindex";
+import { FireworksLLM } from "@llamaindex/fireworks";
 
 Settings.llm = new FireworksLLM({
   apiKey: "<YOUR_API_KEY>",
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/together.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/together.mdx
index 65cc58c416341d17edb0a11cac786642c6bfb243..a9877cfa3a327394020bdd30bb50ba24bcdb2a96 100644
--- a/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/together.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/llms/available_llms/together.mdx
@@ -23,7 +23,8 @@ import { Tab, Tabs } from "fumadocs-ui/components/tabs";
 ## Usage
 
 ```ts
-import { Settings, TogetherLLM } from "llamaindex";
+import { Settings } from "llamaindex";
+import { TogetherLLM } from "@llamaindex/together";
 
 Settings.llm = new TogetherLLM({
   apiKey: "<YOUR_API_KEY>",
diff --git a/examples/deepseek.ts b/examples/deepseek.ts
new file mode 100644
index 0000000000000000000000000000000000000000..1a9b49444add9d7fcecc79b92b2bfc9ec5890c44
--- /dev/null
+++ b/examples/deepseek.ts
@@ -0,0 +1,46 @@
+import { DeepSeekLLM } from "@llamaindex/deepseek";
+
+// process.env.DEEPSEEK_API_KEY is required
+const deepseek = new DeepSeekLLM({
+  apiKey: process.env.DEEPSEEK_API_KEY,
+  model: "deepseek-coder", // or "deepseek-chat"
+});
+
+(async () => {
+  // Example of non-streaming chat
+  const response = await deepseek.chat({
+    messages: [
+      {
+        role: "system",
+        content: "You are an AI assistant",
+      },
+      {
+        role: "user",
+        content: "Tell me about San Francisco",
+      },
+    ],
+    stream: false,
+  });
+  console.log("Response from DeepSeek AI:");
+  console.log(response);
+
+  // Example of streaming chat
+  const generator = await deepseek.chat({
+    messages: [
+      {
+        role: "system",
+        content: "You are an AI assistant",
+      },
+      {
+        role: "user",
+        content: "Write a short poem about San Francisco",
+      },
+    ],
+    stream: true,
+  });
+  console.log("\nStreaming response from DeepSeek AI...");
+  for await (const message of generator) {
+    process.stdout.write(message.delta);
+  }
+  console.log("\n");
+})();
diff --git a/examples/multimodal/jina.ts b/examples/multimodal/jina.ts
index 1c3081d0fcba4f4b51556198f503660913d5c051..e5adc1042bad7541322b14ea7e0f342358d180ee 100644
--- a/examples/multimodal/jina.ts
+++ b/examples/multimodal/jina.ts
@@ -1,10 +1,6 @@
+import { JinaAIEmbedding } from "@llamaindex/jinaai";
 import { SimpleDirectoryReader } from "@llamaindex/readers/directory";
-import {
-  ImageDocument,
-  JinaAIEmbedding,
-  similarity,
-  SimilarityType,
-} from "llamaindex";
+import { ImageDocument, similarity, SimilarityType } from "llamaindex";
 import path from "path";
 
 async function main() {
diff --git a/examples/package.json b/examples/package.json
index 16672acaf71ea2f0798643e83216542395574dd4..4e439d4609e44c73825a35dbb4d7371c49977a4d 100644
--- a/examples/package.json
+++ b/examples/package.json
@@ -44,6 +44,10 @@
     "@llamaindex/voyage-ai": "^1.0.3",
     "@llamaindex/weaviate": "^0.0.11",
     "@llamaindex/workflow": "^0.0.13",
+    "@llamaindex/deepseek": "^0.0.1",
+    "@llamaindex/fireworks": "^0.0.1",
+    "@llamaindex/together": "^0.0.1",
+    "@llamaindex/jinaai": "^0.0.1",
     "@notionhq/client": "^2.2.15",
     "@pinecone-database/pinecone": "^4.0.0",
     "@vercel/postgres": "^0.10.0",
diff --git a/examples/qdrantdb/with-gemini.ts b/examples/qdrantdb/with-gemini.ts
new file mode 100644
index 0000000000000000000000000000000000000000..0c199a55d8fd2cb9eca06e6ca0fd82424e10fa34
--- /dev/null
+++ b/examples/qdrantdb/with-gemini.ts
@@ -0,0 +1,33 @@
+import {
+  GEMINI_EMBEDDING_MODEL,
+  GeminiEmbedding,
+  GeminiSession,
+} from "@llamaindex/google";
+import { QdrantVectorStore } from "@llamaindex/qdrant";
+import {
+  Document,
+  storageContextFromDefaults,
+  VectorStoreIndex,
+} from "llamaindex";
+
+const embedding = new GeminiEmbedding({
+  model: GEMINI_EMBEDDING_MODEL.EMBEDDING_001,
+  session: new GeminiSession({
+    apiKey: process.env.GEMINI_API_KEY,
+  }),
+});
+
+async function main() {
+  const docs = [new Document({ text: "Lorem ipsum dolor sit amet" })];
+  const vectorStore = new QdrantVectorStore({
+    url: process.env.QDRANT_URL,
+    apiKey: process.env.QDRANT_API_KEY,
+    embeddingModel: embedding,
+    collectionName: "gemini_test",
+  });
+  const storageContext = await storageContextFromDefaults({ vectorStore });
+  await VectorStoreIndex.fromDocuments(docs, { storageContext });
+  console.log("Inizialized vector store successfully");
+}
+
+void main().catch((err) => console.error(err));
diff --git a/examples/qdrantdb/with-jina.ts b/examples/qdrantdb/with-jina.ts
new file mode 100644
index 0000000000000000000000000000000000000000..f51f27e4e563523a0a0ed8ccacd42a6691d5c7ed
--- /dev/null
+++ b/examples/qdrantdb/with-jina.ts
@@ -0,0 +1,27 @@
+import { JinaAIEmbedding } from "@llamaindex/jinaai";
+import { QdrantVectorStore } from "@llamaindex/qdrant";
+import {
+  Document,
+  storageContextFromDefaults,
+  VectorStoreIndex,
+} from "llamaindex";
+
+const embedding = new JinaAIEmbedding({
+  apiKey: process.env.JINAAI_API_KEY,
+  model: "jina-embeddings-v3",
+});
+
+async function main() {
+  const docs = [new Document({ text: "Lorem ipsum dolor sit amet" })];
+  const vectorStore = new QdrantVectorStore({
+    url: process.env.QDRANT_URL,
+    apiKey: process.env.QDRANT_API_KEY,
+    embeddingModel: embedding,
+    collectionName: "jina_test",
+  });
+  const storageContext = await storageContextFromDefaults({ vectorStore });
+  await VectorStoreIndex.fromDocuments(docs, { storageContext });
+  console.log("Inizialized vector store successfully");
+}
+
+void main().catch((err) => console.error(err));
diff --git a/examples/readers/src/pdf_fw.ts b/examples/readers/src/pdf_fw.ts
index cad617fa83f67013f2a89a73c2f839f289c57835..78efad1f7d33d59a98547e441008490d304dae77 100644
--- a/examples/readers/src/pdf_fw.ts
+++ b/examples/readers/src/pdf_fw.ts
@@ -1,5 +1,6 @@
+import { FireworksEmbedding, FireworksLLM } from "@llamaindex/fireworks";
 import { PDFReader } from "@llamaindex/readers/pdf";
-import { FireworksEmbedding, FireworksLLM, VectorStoreIndex } from "llamaindex";
+import { VectorStoreIndex } from "llamaindex";
 
 import { Settings } from "llamaindex";
 
diff --git a/examples/together-ai.ts b/examples/together-ai.ts
index 300627659fb8cd75dd3378b67104adde9a0e4300..3823d5998e3fc9cacb46dadd3e081c25bc76638f 100644
--- a/examples/together-ai.ts
+++ b/examples/together-ai.ts
@@ -1,4 +1,4 @@
-import { TogetherEmbedding, TogetherLLM } from "llamaindex";
+import { TogetherEmbedding, TogetherLLM } from "@llamaindex/together";
 
 // process.env.TOGETHER_API_KEY is required
 const together = new TogetherLLM({
diff --git a/examples/together-ai/vector-index.ts b/examples/together-ai/vector-index.ts
index 001c3448ec11980df64bee3a4f4cf84584c1959b..a38fbbac368e144936a37a2fd75908a5e990f0cb 100644
--- a/examples/together-ai/vector-index.ts
+++ b/examples/together-ai/vector-index.ts
@@ -1,12 +1,7 @@
 import fs from "node:fs/promises";
 
-import {
-  Document,
-  Settings,
-  TogetherEmbedding,
-  TogetherLLM,
-  VectorStoreIndex,
-} from "llamaindex";
+import { TogetherEmbedding, TogetherLLM } from "@llamaindex/together";
+import { Document, Settings, VectorStoreIndex } from "llamaindex";
 
 // Update llm to use TogetherAI
 Settings.llm = new TogetherLLM({
diff --git a/packages/llamaindex/src/embeddings/OpenAIEmbedding.ts b/packages/llamaindex/src/embeddings/OpenAIEmbedding.ts
deleted file mode 100644
index 02781efcb42bde6e5598eb279af193c85ec9158f..0000000000000000000000000000000000000000
--- a/packages/llamaindex/src/embeddings/OpenAIEmbedding.ts
+++ /dev/null
@@ -1 +0,0 @@
-export * from "@llamaindex/openai";
diff --git a/packages/llamaindex/src/embeddings/index.ts b/packages/llamaindex/src/embeddings/index.ts
deleted file mode 100644
index 5467f2addc30f04e663d869f75242615627ae667..0000000000000000000000000000000000000000
--- a/packages/llamaindex/src/embeddings/index.ts
+++ /dev/null
@@ -1,5 +0,0 @@
-export * from "@llamaindex/core/embeddings";
-export { FireworksEmbedding } from "./fireworks.js";
-export * from "./JinaAIEmbedding.js";
-export * from "./OpenAIEmbedding.js";
-export { TogetherEmbedding } from "./together.js";
diff --git a/packages/llamaindex/src/index.edge.ts b/packages/llamaindex/src/index.edge.ts
index 63c6993b3d2016148625cdbd10bdf477e6101881..3a79a2080521af7d81a55f909a21fdc55aa9619f 100644
--- a/packages/llamaindex/src/index.edge.ts
+++ b/packages/llamaindex/src/index.edge.ts
@@ -22,6 +22,7 @@ export {
 export * from "@llamaindex/core/agent";
 export * from "@llamaindex/core/chat-engine";
 export * from "@llamaindex/core/data-structs";
+export * from "@llamaindex/core/embeddings";
 export {
   CallbackManager,
   DEFAULT_BASE_URL,
@@ -65,10 +66,10 @@ export * from "@llamaindex/core/storage/doc-store";
 export * from "@llamaindex/core/storage/index-store";
 export * from "@llamaindex/core/storage/kv-store";
 export * from "@llamaindex/core/utils";
+export * from "@llamaindex/openai";
 export * from "@llamaindex/workflow/agent";
 export * from "./agent/index.js";
 export * from "./cloud/index.js";
-export * from "./embeddings/index.js";
 export * from "./engines/chat/index.js";
 export * from "./engines/query/index.js";
 export * from "./evaluation/index.js";
@@ -76,7 +77,6 @@ export * from "./extractors/index.js";
 export * from "./indices/index.js";
 export * from "./ingestion/index.js";
 export { imageToDataUrl } from "./internal/utils.js";
-export * from "./llm/index.js";
 export * from "./node-parser.js";
 export * from "./objects/index.js";
 export * from "./OutputParser.js";
diff --git a/packages/llamaindex/src/index.ts b/packages/llamaindex/src/index.ts
index ccdd367ae7286a779e1f08450de06081dfb06d9d..84a40464e6c67599d0e792084455ad18c59daeb6 100644
--- a/packages/llamaindex/src/index.ts
+++ b/packages/llamaindex/src/index.ts
@@ -1,8 +1,5 @@
 export * from "./index.edge.js";
 
-// TODO: clean up, move to jinaai package
-export { JinaAIEmbedding } from "./embeddings/JinaAIEmbedding.js";
-
 // Don't export file-system stores for non-node.js runtime on top level,
 //  as we cannot guarantee that they will work in other environments
 export * from "./storage/index.js";
diff --git a/packages/llamaindex/src/indices/vectorStore/index.ts b/packages/llamaindex/src/indices/vectorStore/index.ts
index fc03b6e19caf8427cc0bff952b8f2fc10bd6af18..92be0bcb324cf6895be256c6932a2b1d3343d9e0 100644
--- a/packages/llamaindex/src/indices/vectorStore/index.ts
+++ b/packages/llamaindex/src/indices/vectorStore/index.ts
@@ -175,7 +175,7 @@ export class VectorStoreIndex extends BaseIndex<IndexDict> {
     for (const type in nodeMap) {
       const nodes = nodeMap[type as ModalityType];
       const embedModel =
-        this.embedModel ?? this.vectorStores[type as ModalityType]?.embedModel;
+        this.vectorStores[type as ModalityType]?.embedModel ?? this.embedModel;
       if (embedModel && nodes) {
         await embedModel(nodes, {
           logProgress: options?.logProgress,
diff --git a/packages/llamaindex/src/llm/index.ts b/packages/llamaindex/src/llm/index.ts
deleted file mode 100644
index dec9f0801000378e8d6d3d7d3fc2b8ac43d05022..0000000000000000000000000000000000000000
--- a/packages/llamaindex/src/llm/index.ts
+++ /dev/null
@@ -1,4 +0,0 @@
-export { DeepSeekLLM } from "./deepseek.js";
-export { FireworksLLM } from "./fireworks.js";
-export * from "./openai.js";
-export { TogetherLLM } from "./together.js";
diff --git a/packages/llamaindex/src/llm/openai.ts b/packages/llamaindex/src/llm/openai.ts
deleted file mode 100644
index 02781efcb42bde6e5598eb279af193c85ec9158f..0000000000000000000000000000000000000000
--- a/packages/llamaindex/src/llm/openai.ts
+++ /dev/null
@@ -1 +0,0 @@
-export * from "@llamaindex/openai";
diff --git a/packages/llamaindex/tests/MetadataExtractors.test.ts b/packages/llamaindex/tests/MetadataExtractors.test.ts
index 29bb117abbac821be314bcc2a791506e22e2b734..47f560e979ee1b01545391a2678231fa4fb42f63 100644
--- a/packages/llamaindex/tests/MetadataExtractors.test.ts
+++ b/packages/llamaindex/tests/MetadataExtractors.test.ts
@@ -1,13 +1,11 @@
 import { Document } from "@llamaindex/core/schema";
-import { Settings } from "llamaindex";
-import { OpenAIEmbedding } from "llamaindex/embeddings/index";
+import { OpenAI, OpenAIEmbedding, Settings } from "llamaindex";
 import {
   KeywordExtractor,
   QuestionsAnsweredExtractor,
   SummaryExtractor,
   TitleExtractor,
 } from "llamaindex/extractors/index";
-import { OpenAI } from "llamaindex/llm/openai";
 import { SentenceSplitter } from "llamaindex/node-parser";
 import { afterAll, beforeAll, describe, expect, test, vi } from "vitest";
 import {
diff --git a/packages/llamaindex/tests/Selectors.test.ts b/packages/llamaindex/tests/Selectors.test.ts
index 9fe1ace1760e016191e658f6f5e52631c6a32639..5eec85a5b174bf90ff79c03d8a0fc96c70fbdd25 100644
--- a/packages/llamaindex/tests/Selectors.test.ts
+++ b/packages/llamaindex/tests/Selectors.test.ts
@@ -1,7 +1,7 @@
 import { describe, expect, test } from "vitest";
 // from unittest.mock import patch
 
-import { OpenAI } from "llamaindex/llm/index";
+import { OpenAI } from "llamaindex";
 import { LLMSingleSelector } from "llamaindex/selectors/index";
 import { mocStructuredkLlmGeneration } from "./utility/mockOpenAI.js";
 
diff --git a/packages/llamaindex/tests/indices/SummaryIndex.test.ts b/packages/llamaindex/tests/indices/SummaryIndex.test.ts
index c2a523e95c266a502ece510da680a3349e83a997..287dc58388fe90673a15a46a7d551a1a3d3c4647 100644
--- a/packages/llamaindex/tests/indices/SummaryIndex.test.ts
+++ b/packages/llamaindex/tests/indices/SummaryIndex.test.ts
@@ -20,13 +20,13 @@ describe("SummaryIndex", () => {
   let storageContext: StorageContext;
 
   beforeAll(async () => {
-    storageContext = await storageContextFromDefaults({
-      persistDir: testDir,
-    });
-
     const embedModel = new OpenAIEmbedding();
     mockEmbeddingModel(embedModel);
     Settings.embedModel = embedModel;
+
+    storageContext = await storageContextFromDefaults({
+      persistDir: testDir,
+    });
   });
 
   afterAll(() => {
diff --git a/packages/llamaindex/tests/indices/VectorStoreIndex.test.ts b/packages/llamaindex/tests/indices/VectorStoreIndex.test.ts
index 61c8299a28c5b82d64d07ae9650ce172f4638c07..5a755ec711742aeffef8275251201f3fe67aed2d 100644
--- a/packages/llamaindex/tests/indices/VectorStoreIndex.test.ts
+++ b/packages/llamaindex/tests/indices/VectorStoreIndex.test.ts
@@ -9,7 +9,7 @@ import { DocStoreStrategy } from "llamaindex/ingestion/strategies/index";
 import { mkdtemp, rm } from "node:fs/promises";
 import { tmpdir } from "node:os";
 import { join } from "node:path";
-import { afterAll, beforeAll, describe, expect, test, vi } from "vitest";
+import { afterAll, beforeAll, describe, expect, it, test, vi } from "vitest";
 
 const testDir = await mkdtemp(join(tmpdir(), "test-"));
 
@@ -24,6 +24,10 @@ describe("VectorStoreIndex", () => {
   ) => Promise<Array<number>>;
 
   beforeAll(async () => {
+    const embedModel = new OpenAIEmbedding();
+    mockEmbeddingModel(embedModel);
+    Settings.embedModel = embedModel;
+
     storageContext = await mockStorageContext(testDir);
     testStrategy = async (
       strategy: DocStoreStrategy,
@@ -41,10 +45,6 @@ describe("VectorStoreIndex", () => {
       }
       return entries;
     };
-
-    const embedModel = new OpenAIEmbedding();
-    mockEmbeddingModel(embedModel);
-    Settings.embedModel = embedModel;
   });
 
   afterAll(() => {
@@ -65,3 +65,28 @@ describe("VectorStoreIndex", () => {
     await rm(testDir, { recursive: true });
   });
 });
+
+describe("[VectorStoreIndex] use embedding model", () => {
+  it("should use embedding model passed in options instead of Settings", async () => {
+    const documents = [new Document({ text: "This needs to be embedded" })];
+
+    // Create mock embedding models
+    const settingsEmbedModel = new OpenAIEmbedding();
+    const customEmbedModel = new OpenAIEmbedding();
+
+    // Mock the embedding models using the utility function
+    mockEmbeddingModel(settingsEmbedModel);
+    mockEmbeddingModel(customEmbedModel);
+
+    // Add spies to track calls
+    const settingsSpy = vi.spyOn(settingsEmbedModel, "getTextEmbeddings");
+    const customSpy = vi.spyOn(customEmbedModel, "getTextEmbeddings");
+
+    Settings.embedModel = settingsEmbedModel;
+
+    const storageContext = await mockStorageContext(testDir, customEmbedModel); // setup custom embedding model
+    await VectorStoreIndex.fromDocuments(documents, { storageContext });
+    expect(customSpy).toHaveBeenCalled();
+    expect(settingsSpy).not.toHaveBeenCalled();
+  });
+});
diff --git a/packages/llamaindex/tests/utility/mockOpenAI.ts b/packages/llamaindex/tests/utility/mockOpenAI.ts
index 102ce43b06515d121f440ca28fa7c776c5786f01..e6d90a3bfdf2577acec6117c6a3bdbd491147f93 100644
--- a/packages/llamaindex/tests/utility/mockOpenAI.ts
+++ b/packages/llamaindex/tests/utility/mockOpenAI.ts
@@ -1,8 +1,6 @@
 import type { CallbackManager } from "@llamaindex/core/global";
-import type { LLMChatParamsBase } from "llamaindex";
-import { Settings } from "llamaindex";
-import type { OpenAIEmbedding } from "llamaindex/embeddings/OpenAIEmbedding";
-import { OpenAI } from "llamaindex/llm/openai";
+import type { LLMChatParamsBase, OpenAIEmbedding } from "llamaindex";
+import { OpenAI, Settings } from "llamaindex";
 import { vi } from "vitest";
 
 export const DEFAULT_LLM_TEXT_OUTPUT = "MOCK_TOKEN_1-MOCK_TOKEN_2";
diff --git a/packages/llamaindex/tests/utility/mockStorageContext.ts b/packages/llamaindex/tests/utility/mockStorageContext.ts
index 4a8c0f8802ff61b31d2fb2c99856a6f74ecb7993..dab379d7d6ce9a3c5e1902debd489b74fe30fc47 100644
--- a/packages/llamaindex/tests/utility/mockStorageContext.ts
+++ b/packages/llamaindex/tests/utility/mockStorageContext.ts
@@ -1,14 +1,27 @@
-import { OpenAIEmbedding, storageContextFromDefaults } from "llamaindex";
+import {
+  BaseEmbedding,
+  OpenAIEmbedding,
+  storageContextFromDefaults,
+} from "llamaindex";
 
 import { mockEmbeddingModel } from "./mockOpenAI.js";
 
-export async function mockStorageContext(testDir: string) {
+export async function mockStorageContext(
+  testDir: string,
+  embeddingModel?: BaseEmbedding,
+) {
   const storageContext = await storageContextFromDefaults({
     persistDir: testDir,
   });
   for (const store of Object.values(storageContext.vectorStores)) {
-    store.embedModel = new OpenAIEmbedding();
-    mockEmbeddingModel(store.embedModel as OpenAIEmbedding);
+    if (embeddingModel) {
+      // use embeddingModel if it is passed in
+      store.embedModel = embeddingModel;
+    } else {
+      // mock an embedding model for testing
+      store.embedModel = new OpenAIEmbedding();
+      mockEmbeddingModel(store.embedModel as OpenAIEmbedding);
+    }
   }
   return storageContext;
 }
diff --git a/packages/providers/deepseek/package.json b/packages/providers/deepseek/package.json
new file mode 100644
index 0000000000000000000000000000000000000000..02d3edf3337f27b3425f099a6a8c2ca048f36638
--- /dev/null
+++ b/packages/providers/deepseek/package.json
@@ -0,0 +1,39 @@
+{
+  "name": "@llamaindex/deepseek",
+  "description": "DeepSeek Adapter for LlamaIndex",
+  "version": "0.0.1",
+  "type": "module",
+  "main": "./dist/index.cjs",
+  "module": "./dist/index.js",
+  "exports": {
+    ".": {
+      "require": {
+        "types": "./dist/index.d.cts",
+        "default": "./dist/index.cjs"
+      },
+      "import": {
+        "types": "./dist/index.d.ts",
+        "default": "./dist/index.js"
+      }
+    }
+  },
+  "files": [
+    "dist"
+  ],
+  "repository": {
+    "type": "git",
+    "url": "git+https://github.com/run-llama/LlamaIndexTS.git",
+    "directory": "packages/providers/deepseek"
+  },
+  "scripts": {
+    "build": "bunchee",
+    "dev": "bunchee --watch"
+  },
+  "devDependencies": {
+    "bunchee": "6.3.4"
+  },
+  "dependencies": {
+    "@llamaindex/env": "workspace:*",
+    "@llamaindex/openai": "workspace:*"
+  }
+}
diff --git a/packages/providers/deepseek/src/index.ts b/packages/providers/deepseek/src/index.ts
new file mode 100644
index 0000000000000000000000000000000000000000..e6679f111f51c327bb3c414234979edfcf3491c1
--- /dev/null
+++ b/packages/providers/deepseek/src/index.ts
@@ -0,0 +1 @@
+export * from "./llm";
diff --git a/packages/llamaindex/src/llm/deepseek.ts b/packages/providers/deepseek/src/llm.ts
similarity index 100%
rename from packages/llamaindex/src/llm/deepseek.ts
rename to packages/providers/deepseek/src/llm.ts
diff --git a/packages/providers/deepseek/tsconfig.json b/packages/providers/deepseek/tsconfig.json
new file mode 100644
index 0000000000000000000000000000000000000000..bd8900e3ac6680d07b9e885f4121a6834bf60d68
--- /dev/null
+++ b/packages/providers/deepseek/tsconfig.json
@@ -0,0 +1,19 @@
+{
+  "extends": "../../../tsconfig.json",
+  "compilerOptions": {
+    "target": "ESNext",
+    "module": "ESNext",
+    "moduleResolution": "bundler",
+    "outDir": "./lib",
+    "tsBuildInfoFile": "./lib/.tsbuildinfo"
+  },
+  "include": ["./src"],
+  "references": [
+    {
+      "path": "../openai/tsconfig.json"
+    },
+    {
+      "path": "../../env/tsconfig.json"
+    }
+  ]
+}
diff --git a/packages/providers/fireworks/package.json b/packages/providers/fireworks/package.json
new file mode 100644
index 0000000000000000000000000000000000000000..de5a063e6766b1222fd635e63e30c45d1761656d
--- /dev/null
+++ b/packages/providers/fireworks/package.json
@@ -0,0 +1,39 @@
+{
+  "name": "@llamaindex/fireworks",
+  "description": "Fireworks Adapter for LlamaIndex",
+  "version": "0.0.1",
+  "type": "module",
+  "main": "./dist/index.cjs",
+  "module": "./dist/index.js",
+  "exports": {
+    ".": {
+      "require": {
+        "types": "./dist/index.d.cts",
+        "default": "./dist/index.cjs"
+      },
+      "import": {
+        "types": "./dist/index.d.ts",
+        "default": "./dist/index.js"
+      }
+    }
+  },
+  "files": [
+    "dist"
+  ],
+  "repository": {
+    "type": "git",
+    "url": "git+https://github.com/run-llama/LlamaIndexTS.git",
+    "directory": "packages/providers/fireworks"
+  },
+  "scripts": {
+    "build": "bunchee",
+    "dev": "bunchee --watch"
+  },
+  "devDependencies": {
+    "bunchee": "6.3.4"
+  },
+  "dependencies": {
+    "@llamaindex/env": "workspace:*",
+    "@llamaindex/openai": "workspace:*"
+  }
+}
diff --git a/packages/llamaindex/src/embeddings/fireworks.ts b/packages/providers/fireworks/src/embedding.ts
similarity index 100%
rename from packages/llamaindex/src/embeddings/fireworks.ts
rename to packages/providers/fireworks/src/embedding.ts
diff --git a/packages/providers/fireworks/src/index.ts b/packages/providers/fireworks/src/index.ts
new file mode 100644
index 0000000000000000000000000000000000000000..90b741f1e2312161635ee068cc6e1fc14d865b88
--- /dev/null
+++ b/packages/providers/fireworks/src/index.ts
@@ -0,0 +1,2 @@
+export * from "./embedding";
+export * from "./llm";
diff --git a/packages/llamaindex/src/llm/fireworks.ts b/packages/providers/fireworks/src/llm.ts
similarity index 100%
rename from packages/llamaindex/src/llm/fireworks.ts
rename to packages/providers/fireworks/src/llm.ts
diff --git a/packages/providers/fireworks/tsconfig.json b/packages/providers/fireworks/tsconfig.json
new file mode 100644
index 0000000000000000000000000000000000000000..bd8900e3ac6680d07b9e885f4121a6834bf60d68
--- /dev/null
+++ b/packages/providers/fireworks/tsconfig.json
@@ -0,0 +1,19 @@
+{
+  "extends": "../../../tsconfig.json",
+  "compilerOptions": {
+    "target": "ESNext",
+    "module": "ESNext",
+    "moduleResolution": "bundler",
+    "outDir": "./lib",
+    "tsBuildInfoFile": "./lib/.tsbuildinfo"
+  },
+  "include": ["./src"],
+  "references": [
+    {
+      "path": "../openai/tsconfig.json"
+    },
+    {
+      "path": "../../env/tsconfig.json"
+    }
+  ]
+}
diff --git a/packages/providers/jinaai/package.json b/packages/providers/jinaai/package.json
new file mode 100644
index 0000000000000000000000000000000000000000..deb2cdd89364bfeec0ddb7d71c2f1e56fafa5238
--- /dev/null
+++ b/packages/providers/jinaai/package.json
@@ -0,0 +1,40 @@
+{
+  "name": "@llamaindex/jinaai",
+  "description": "JinaAI Adapter for LlamaIndex",
+  "version": "0.0.1",
+  "type": "module",
+  "main": "./dist/index.cjs",
+  "module": "./dist/index.js",
+  "exports": {
+    ".": {
+      "require": {
+        "types": "./dist/index.d.cts",
+        "default": "./dist/index.cjs"
+      },
+      "import": {
+        "types": "./dist/index.d.ts",
+        "default": "./dist/index.js"
+      }
+    }
+  },
+  "files": [
+    "dist"
+  ],
+  "repository": {
+    "type": "git",
+    "url": "git+https://github.com/run-llama/LlamaIndexTS.git",
+    "directory": "packages/providers/jinaai"
+  },
+  "scripts": {
+    "build": "bunchee",
+    "dev": "bunchee --watch"
+  },
+  "devDependencies": {
+    "bunchee": "6.3.4"
+  },
+  "dependencies": {
+    "@llamaindex/core": "workspace:*",
+    "@llamaindex/env": "workspace:*",
+    "@llamaindex/openai": "workspace:*"
+  }
+}
diff --git a/packages/llamaindex/src/embeddings/JinaAIEmbedding.ts b/packages/providers/jinaai/src/embedding.ts
similarity index 97%
rename from packages/llamaindex/src/embeddings/JinaAIEmbedding.ts
rename to packages/providers/jinaai/src/embedding.ts
index 9f7f3d705ab96ad73200ab219496a942d0ccd42a..6ad09293a7b4c911362154c594ba3bfd85733892 100644
--- a/packages/llamaindex/src/embeddings/JinaAIEmbedding.ts
+++ b/packages/providers/jinaai/src/embedding.ts
@@ -1,7 +1,7 @@
 import { MultiModalEmbedding } from "@llamaindex/core/embeddings";
+import type { ImageType } from "@llamaindex/core/schema";
+import { imageToDataUrl } from "@llamaindex/core/utils";
 import { getEnv } from "@llamaindex/env";
-import { imageToDataUrl } from "../internal/utils.js";
-import type { ImageType } from "../Node.js";
 
 function isLocal(url: ImageType): boolean {
   if (url instanceof Blob) return true;
diff --git a/packages/providers/jinaai/src/index.ts b/packages/providers/jinaai/src/index.ts
new file mode 100644
index 0000000000000000000000000000000000000000..5b00d3610f478caa7eaf3d077fbe588c76cf6d71
--- /dev/null
+++ b/packages/providers/jinaai/src/index.ts
@@ -0,0 +1 @@
+export * from "./embedding";
diff --git a/packages/providers/jinaai/tsconfig.json b/packages/providers/jinaai/tsconfig.json
new file mode 100644
index 0000000000000000000000000000000000000000..bd8900e3ac6680d07b9e885f4121a6834bf60d68
--- /dev/null
+++ b/packages/providers/jinaai/tsconfig.json
@@ -0,0 +1,19 @@
+{
+  "extends": "../../../tsconfig.json",
+  "compilerOptions": {
+    "target": "ESNext",
+    "module": "ESNext",
+    "moduleResolution": "bundler",
+    "outDir": "./lib",
+    "tsBuildInfoFile": "./lib/.tsbuildinfo"
+  },
+  "include": ["./src"],
+  "references": [
+    {
+      "path": "../openai/tsconfig.json"
+    },
+    {
+      "path": "../../env/tsconfig.json"
+    }
+  ]
+}
diff --git a/packages/providers/together/package.json b/packages/providers/together/package.json
new file mode 100644
index 0000000000000000000000000000000000000000..6a5fce0debe9eba47b1c6e3fa3f1acb2da086fc4
--- /dev/null
+++ b/packages/providers/together/package.json
@@ -0,0 +1,39 @@
+{
+  "name": "@llamaindex/together",
+  "description": "Together Adapter for LlamaIndex",
+  "version": "0.0.1",
+  "type": "module",
+  "main": "./dist/index.cjs",
+  "module": "./dist/index.js",
+  "exports": {
+    ".": {
+      "require": {
+        "types": "./dist/index.d.cts",
+        "default": "./dist/index.cjs"
+      },
+      "import": {
+        "types": "./dist/index.d.ts",
+        "default": "./dist/index.js"
+      }
+    }
+  },
+  "files": [
+    "dist"
+  ],
+  "repository": {
+    "type": "git",
+    "url": "git+https://github.com/run-llama/LlamaIndexTS.git",
+    "directory": "packages/providers/together"
+  },
+  "scripts": {
+    "build": "bunchee",
+    "dev": "bunchee --watch"
+  },
+  "devDependencies": {
+    "bunchee": "6.3.4"
+  },
+  "dependencies": {
+    "@llamaindex/env": "workspace:*",
+    "@llamaindex/openai": "workspace:*"
+  }
+}
diff --git a/packages/llamaindex/src/embeddings/together.ts b/packages/providers/together/src/embedding.ts
similarity index 100%
rename from packages/llamaindex/src/embeddings/together.ts
rename to packages/providers/together/src/embedding.ts
diff --git a/packages/providers/together/src/index.ts b/packages/providers/together/src/index.ts
new file mode 100644
index 0000000000000000000000000000000000000000..90b741f1e2312161635ee068cc6e1fc14d865b88
--- /dev/null
+++ b/packages/providers/together/src/index.ts
@@ -0,0 +1,2 @@
+export * from "./embedding";
+export * from "./llm";
diff --git a/packages/llamaindex/src/llm/together.ts b/packages/providers/together/src/llm.ts
similarity index 100%
rename from packages/llamaindex/src/llm/together.ts
rename to packages/providers/together/src/llm.ts
diff --git a/packages/providers/together/tsconfig.json b/packages/providers/together/tsconfig.json
new file mode 100644
index 0000000000000000000000000000000000000000..3fad47fb71026680ff194da502936fedd967972a
--- /dev/null
+++ b/packages/providers/together/tsconfig.json
@@ -0,0 +1,19 @@
+{
+  "extends": "../../../tsconfig.json",
+  "compilerOptions": {
+    "target": "ESNext",
+    "module": "ESNext",
+    "moduleResolution": "bundler",
+    "outDir": "./lib",
+    "tsBuildInfoFile": "./lib/.tsbuildinfo"
+  },
+  "include": ["./src"],
+  "references": [
+    {
+      "path": "../../core/tsconfig.json"
+    },
+    {
+      "path": "../../env/tsconfig.json"
+    }
+  ]
+}
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index 925315405661d3f6ba91906de7d49fa8ed50cfba..4114b9a56693076222e593744a10e68803f778de 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -622,12 +622,18 @@ importers:
       '@llamaindex/deepinfra':
         specifier: ^0.0.42
         version: link:../packages/providers/deepinfra
+      '@llamaindex/deepseek':
+        specifier: ^0.0.1
+        version: link:../packages/providers/deepseek
       '@llamaindex/env':
         specifier: ^0.1.29
         version: link:../packages/env
       '@llamaindex/firestore':
         specifier: ^1.0.4
         version: link:../packages/providers/storage/firestore
+      '@llamaindex/fireworks':
+        specifier: ^0.0.1
+        version: link:../packages/providers/fireworks
       '@llamaindex/google':
         specifier: ^0.0.13
         version: link:../packages/providers/google
@@ -637,6 +643,9 @@ importers:
       '@llamaindex/huggingface':
         specifier: ^0.0.42
         version: link:../packages/providers/huggingface
+      '@llamaindex/jinaai':
+        specifier: ^0.0.1
+        version: link:../packages/providers/jinaai
       '@llamaindex/milvus':
         specifier: ^0.1.6
         version: link:../packages/providers/storage/milvus
@@ -676,6 +685,9 @@ importers:
       '@llamaindex/replicate':
         specifier: ^0.0.39
         version: link:../packages/providers/replicate
+      '@llamaindex/together':
+        specifier: ^0.0.1
+        version: link:../packages/providers/together
       '@llamaindex/upstash':
         specifier: ^0.0.11
         version: link:../packages/providers/storage/upstash
@@ -1218,6 +1230,32 @@ importers:
         specifier: 6.3.4
         version: 6.3.4(patch_hash=pavboztthlgni7m5gzw7643oru)(typescript@5.7.3)
 
+  packages/providers/deepseek:
+    dependencies:
+      '@llamaindex/env':
+        specifier: workspace:*
+        version: link:../../env
+      '@llamaindex/openai':
+        specifier: workspace:*
+        version: link:../openai
+    devDependencies:
+      bunchee:
+        specifier: 6.3.4
+        version: 6.3.4(patch_hash=pavboztthlgni7m5gzw7643oru)(typescript@5.7.3)
+
+  packages/providers/fireworks:
+    dependencies:
+      '@llamaindex/env':
+        specifier: workspace:*
+        version: link:../../env
+      '@llamaindex/openai':
+        specifier: workspace:*
+        version: link:../openai
+    devDependencies:
+      bunchee:
+        specifier: 6.3.4
+        version: 6.3.4(patch_hash=pavboztthlgni7m5gzw7643oru)(typescript@5.7.3)
+
   packages/providers/google:
     dependencies:
       '@google-cloud/vertexai':
@@ -1275,6 +1313,22 @@ importers:
         specifier: 6.3.4
         version: 6.3.4(patch_hash=pavboztthlgni7m5gzw7643oru)(typescript@5.7.3)
 
+  packages/providers/jinaai:
+    dependencies:
+      '@llamaindex/core':
+        specifier: workspace:*
+        version: link:../../core
+      '@llamaindex/env':
+        specifier: workspace:*
+        version: link:../../env
+      '@llamaindex/openai':
+        specifier: workspace:*
+        version: link:../openai
+    devDependencies:
+      bunchee:
+        specifier: 6.3.4
+        version: 6.3.4(patch_hash=pavboztthlgni7m5gzw7643oru)(typescript@5.7.3)
+
   packages/providers/mistral:
     dependencies:
       '@llamaindex/core':
@@ -1607,6 +1661,19 @@ importers:
         specifier: 6.3.4
         version: 6.3.4(patch_hash=pavboztthlgni7m5gzw7643oru)(typescript@5.7.3)
 
+  packages/providers/together:
+    dependencies:
+      '@llamaindex/env':
+        specifier: workspace:*
+        version: link:../../env
+      '@llamaindex/openai':
+        specifier: workspace:*
+        version: link:../openai
+    devDependencies:
+      bunchee:
+        specifier: 6.3.4
+        version: 6.3.4(patch_hash=pavboztthlgni7m5gzw7643oru)(typescript@5.7.3)
+
   packages/providers/vercel:
     dependencies:
       '@llamaindex/core':
diff --git a/tsconfig.json b/tsconfig.json
index cca56fe091553502103c0fb0ee2de42121af8f4d..8af5db8f315d43f4d3253d106660a195a47b382d 100644
--- a/tsconfig.json
+++ b/tsconfig.json
@@ -175,6 +175,18 @@
     },
     {
       "path": "./packages/providers/voyage-ai/tsconfig.json"
+    },
+    {
+      "path": "./packages/providers/deepseek/tsconfig.json"
+    },
+    {
+      "path": "./packages/providers/fireworks/tsconfig.json"
+    },
+    {
+      "path": "./packages/providers/together/tsconfig.json"
+    },
+    {
+      "path": "./packages/providers/jinaai/tsconfig.json"
     }
   ]
 }