Skip to content
Snippets Groups Projects
Unverified Commit bbc8c878 authored by Thuc Pham's avatar Thuc Pham Committed by GitHub
Browse files

fix: prefer using embedding model from vector store (#1708)

parent 4b49428f
No related branches found
No related tags found
No related merge requests found
Showing
with 143 additions and 39 deletions
---
"llamaindex": patch
"@llamaindex/deepseek": patch
"@llamaindex/fireworks": patch
"@llamaindex/together": patch
"@llamaindex/jinaai": patch
---
fix: prefer using embedding model from vector store
......@@ -2,10 +2,11 @@
title: Jina AI
---
To use Jina AI embeddings, you need to import `JinaAIEmbedding` from `llamaindex`.
To use Jina AI embeddings, you need to import `JinaAIEmbedding` from `@llamaindex/jinaai`.
```ts
import { JinaAIEmbedding, Settings } from "llamaindex";
import { Settings } from "llamaindex";
import { JinaAIEmbedding } from "@llamaindex/jinaai";
Settings.embedModel = new JinaAIEmbedding();
......
......@@ -2,10 +2,11 @@
title: Together
---
To use together embeddings, you need to import `TogetherEmbedding` from `llamaindex`.
To use together embeddings, you need to import `TogetherEmbedding` from `@llamaindex/together`.
```ts
import { TogetherEmbedding, Settings } from "llamaindex";
import { Settings } from "llamaindex";
import { TogetherEmbedding } from "@llamaindex/together";
Settings.embedModel = new TogetherEmbedding({
apiKey: "<YOUR_API_KEY>",
......
......@@ -7,7 +7,8 @@ title: DeepSeek LLM
## Usage
```ts
import { DeepSeekLLM, Settings } from "llamaindex";
import { Settings } from "llamaindex";
import { DeepSeekLLM } from "@llamaindex/deepseek";
Settings.llm = new DeepSeekLLM({
apiKey: "<YOUR_API_KEY>",
......@@ -18,7 +19,8 @@ Settings.llm = new DeepSeekLLM({
## Example
```ts
import { DeepSeekLLM, Document, VectorStoreIndex, Settings } from "llamaindex";
import { Document, VectorStoreIndex, Settings } from "llamaindex";
import { DeepSeekLLM } from "@llamaindex/deepseek";
const deepseekLlm = new DeepSeekLLM({
apiKey: "<YOUR_API_KEY>",
......
......@@ -7,7 +7,8 @@ title: Fireworks LLM
## Usage
```ts
import { FireworksLLM, Settings } from "llamaindex";
import { Settings } from "llamaindex";
import { FireworksLLM } from "@llamaindex/fireworks";
Settings.llm = new FireworksLLM({
apiKey: "<YOUR_API_KEY>",
......
......@@ -23,7 +23,8 @@ import { Tab, Tabs } from "fumadocs-ui/components/tabs";
## Usage
```ts
import { Settings, TogetherLLM } from "llamaindex";
import { Settings } from "llamaindex";
import { TogetherLLM } from "@llamaindex/together";
Settings.llm = new TogetherLLM({
apiKey: "<YOUR_API_KEY>",
......
import { DeepSeekLLM } from "@llamaindex/deepseek";
// process.env.DEEPSEEK_API_KEY is required
const deepseek = new DeepSeekLLM({
apiKey: process.env.DEEPSEEK_API_KEY,
model: "deepseek-coder", // or "deepseek-chat"
});
(async () => {
// Example of non-streaming chat
const response = await deepseek.chat({
messages: [
{
role: "system",
content: "You are an AI assistant",
},
{
role: "user",
content: "Tell me about San Francisco",
},
],
stream: false,
});
console.log("Response from DeepSeek AI:");
console.log(response);
// Example of streaming chat
const generator = await deepseek.chat({
messages: [
{
role: "system",
content: "You are an AI assistant",
},
{
role: "user",
content: "Write a short poem about San Francisco",
},
],
stream: true,
});
console.log("\nStreaming response from DeepSeek AI...");
for await (const message of generator) {
process.stdout.write(message.delta);
}
console.log("\n");
})();
import { JinaAIEmbedding } from "@llamaindex/jinaai";
import { SimpleDirectoryReader } from "@llamaindex/readers/directory";
import {
ImageDocument,
JinaAIEmbedding,
similarity,
SimilarityType,
} from "llamaindex";
import { ImageDocument, similarity, SimilarityType } from "llamaindex";
import path from "path";
async function main() {
......
......@@ -44,6 +44,10 @@
"@llamaindex/voyage-ai": "^1.0.3",
"@llamaindex/weaviate": "^0.0.11",
"@llamaindex/workflow": "^0.0.13",
"@llamaindex/deepseek": "^0.0.1",
"@llamaindex/fireworks": "^0.0.1",
"@llamaindex/together": "^0.0.1",
"@llamaindex/jinaai": "^0.0.1",
"@notionhq/client": "^2.2.15",
"@pinecone-database/pinecone": "^4.0.0",
"@vercel/postgres": "^0.10.0",
......
import {
GEMINI_EMBEDDING_MODEL,
GeminiEmbedding,
GeminiSession,
} from "@llamaindex/google";
import { QdrantVectorStore } from "@llamaindex/qdrant";
import {
Document,
storageContextFromDefaults,
VectorStoreIndex,
} from "llamaindex";
const embedding = new GeminiEmbedding({
model: GEMINI_EMBEDDING_MODEL.EMBEDDING_001,
session: new GeminiSession({
apiKey: process.env.GEMINI_API_KEY,
}),
});
async function main() {
const docs = [new Document({ text: "Lorem ipsum dolor sit amet" })];
const vectorStore = new QdrantVectorStore({
url: process.env.QDRANT_URL,
apiKey: process.env.QDRANT_API_KEY,
embeddingModel: embedding,
collectionName: "gemini_test",
});
const storageContext = await storageContextFromDefaults({ vectorStore });
await VectorStoreIndex.fromDocuments(docs, { storageContext });
console.log("Inizialized vector store successfully");
}
void main().catch((err) => console.error(err));
import { JinaAIEmbedding } from "@llamaindex/jinaai";
import { QdrantVectorStore } from "@llamaindex/qdrant";
import {
Document,
storageContextFromDefaults,
VectorStoreIndex,
} from "llamaindex";
const embedding = new JinaAIEmbedding({
apiKey: process.env.JINAAI_API_KEY,
model: "jina-embeddings-v3",
});
async function main() {
const docs = [new Document({ text: "Lorem ipsum dolor sit amet" })];
const vectorStore = new QdrantVectorStore({
url: process.env.QDRANT_URL,
apiKey: process.env.QDRANT_API_KEY,
embeddingModel: embedding,
collectionName: "jina_test",
});
const storageContext = await storageContextFromDefaults({ vectorStore });
await VectorStoreIndex.fromDocuments(docs, { storageContext });
console.log("Inizialized vector store successfully");
}
void main().catch((err) => console.error(err));
import { FireworksEmbedding, FireworksLLM } from "@llamaindex/fireworks";
import { PDFReader } from "@llamaindex/readers/pdf";
import { FireworksEmbedding, FireworksLLM, VectorStoreIndex } from "llamaindex";
import { VectorStoreIndex } from "llamaindex";
import { Settings } from "llamaindex";
......
import { TogetherEmbedding, TogetherLLM } from "llamaindex";
import { TogetherEmbedding, TogetherLLM } from "@llamaindex/together";
// process.env.TOGETHER_API_KEY is required
const together = new TogetherLLM({
......
import fs from "node:fs/promises";
import {
Document,
Settings,
TogetherEmbedding,
TogetherLLM,
VectorStoreIndex,
} from "llamaindex";
import { TogetherEmbedding, TogetherLLM } from "@llamaindex/together";
import { Document, Settings, VectorStoreIndex } from "llamaindex";
// Update llm to use TogetherAI
Settings.llm = new TogetherLLM({
......
export * from "@llamaindex/openai";
export * from "@llamaindex/core/embeddings";
export { FireworksEmbedding } from "./fireworks.js";
export * from "./JinaAIEmbedding.js";
export * from "./OpenAIEmbedding.js";
export { TogetherEmbedding } from "./together.js";
......@@ -22,6 +22,7 @@ export {
export * from "@llamaindex/core/agent";
export * from "@llamaindex/core/chat-engine";
export * from "@llamaindex/core/data-structs";
export * from "@llamaindex/core/embeddings";
export {
CallbackManager,
DEFAULT_BASE_URL,
......@@ -65,10 +66,10 @@ export * from "@llamaindex/core/storage/doc-store";
export * from "@llamaindex/core/storage/index-store";
export * from "@llamaindex/core/storage/kv-store";
export * from "@llamaindex/core/utils";
export * from "@llamaindex/openai";
export * from "@llamaindex/workflow/agent";
export * from "./agent/index.js";
export * from "./cloud/index.js";
export * from "./embeddings/index.js";
export * from "./engines/chat/index.js";
export * from "./engines/query/index.js";
export * from "./evaluation/index.js";
......@@ -76,7 +77,6 @@ export * from "./extractors/index.js";
export * from "./indices/index.js";
export * from "./ingestion/index.js";
export { imageToDataUrl } from "./internal/utils.js";
export * from "./llm/index.js";
export * from "./node-parser.js";
export * from "./objects/index.js";
export * from "./OutputParser.js";
......
export * from "./index.edge.js";
// TODO: clean up, move to jinaai package
export { JinaAIEmbedding } from "./embeddings/JinaAIEmbedding.js";
// Don't export file-system stores for non-node.js runtime on top level,
// as we cannot guarantee that they will work in other environments
export * from "./storage/index.js";
......
......@@ -175,7 +175,7 @@ export class VectorStoreIndex extends BaseIndex<IndexDict> {
for (const type in nodeMap) {
const nodes = nodeMap[type as ModalityType];
const embedModel =
this.embedModel ?? this.vectorStores[type as ModalityType]?.embedModel;
this.vectorStores[type as ModalityType]?.embedModel ?? this.embedModel;
if (embedModel && nodes) {
await embedModel(nodes, {
logProgress: options?.logProgress,
......
export { DeepSeekLLM } from "./deepseek.js";
export { FireworksLLM } from "./fireworks.js";
export * from "./openai.js";
export { TogetherLLM } from "./together.js";
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment