From b4c6d509a05d5bd428494f258fb6da496ab5a3a1 Mon Sep 17 00:00:00 2001
From: Emanuel Ferreira <contatoferreirads@gmail.com>
Date: Sat, 10 Feb 2024 22:10:20 -0300
Subject: [PATCH] docs: available embeddings (#538)

---
 .../docs/modules/embeddings/_category_.yml    |  2 ++
 .../available_embeddings/_category_.yml       |  1 +
 .../available_embeddings/huggingface.md       | 25 ++++++++++++++++
 .../available_embeddings/mistral.md           | 29 +++++++++++++++++++
 .../embeddings/available_embeddings/ollama.md | 27 +++++++++++++++++
 .../embeddings/available_embeddings/openai.md | 27 +++++++++++++++++
 .../available_embeddings/together.md          | 29 +++++++++++++++++++
 apps/docs/docs/modules/embeddings/index.md    | 22 ++++++++++++++
 apps/docs/docs/modules/llms/index.md          |  4 +++
 9 files changed, 166 insertions(+)
 create mode 100644 apps/docs/docs/modules/embeddings/_category_.yml
 create mode 100644 apps/docs/docs/modules/embeddings/available_embeddings/_category_.yml
 create mode 100644 apps/docs/docs/modules/embeddings/available_embeddings/huggingface.md
 create mode 100644 apps/docs/docs/modules/embeddings/available_embeddings/mistral.md
 create mode 100644 apps/docs/docs/modules/embeddings/available_embeddings/ollama.md
 create mode 100644 apps/docs/docs/modules/embeddings/available_embeddings/openai.md
 create mode 100644 apps/docs/docs/modules/embeddings/available_embeddings/together.md
 create mode 100644 apps/docs/docs/modules/embeddings/index.md

diff --git a/apps/docs/docs/modules/embeddings/_category_.yml b/apps/docs/docs/modules/embeddings/_category_.yml
new file mode 100644
index 000000000..aa202851b
--- /dev/null
+++ b/apps/docs/docs/modules/embeddings/_category_.yml
@@ -0,0 +1,2 @@
+label: "Embeddings"
+position: 3
diff --git a/apps/docs/docs/modules/embeddings/available_embeddings/_category_.yml b/apps/docs/docs/modules/embeddings/available_embeddings/_category_.yml
new file mode 100644
index 000000000..d401bda91
--- /dev/null
+++ b/apps/docs/docs/modules/embeddings/available_embeddings/_category_.yml
@@ -0,0 +1 @@
+label: "Available Embeddings"
diff --git a/apps/docs/docs/modules/embeddings/available_embeddings/huggingface.md b/apps/docs/docs/modules/embeddings/available_embeddings/huggingface.md
new file mode 100644
index 000000000..cb058b033
--- /dev/null
+++ b/apps/docs/docs/modules/embeddings/available_embeddings/huggingface.md
@@ -0,0 +1,25 @@
+# HuggingFace
+
+To use HuggingFace embeddings, you need to import `HuggingFaceEmbedding` from `llamaindex`.
+
+```ts
+import { HuggingFaceEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const huggingFaceEmbeds = new HuggingFaceEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+
+const document = new Document({ text: essay, id_: "essay" });
+
+const index = await VectorStoreIndex.fromDocuments([document], {
+  serviceContext,
+});
+
+const queryEngine = index.asQueryEngine();
+
+const query = "What is the meaning of life?";
+
+const results = await queryEngine.query({
+  query,
+});
+```
diff --git a/apps/docs/docs/modules/embeddings/available_embeddings/mistral.md b/apps/docs/docs/modules/embeddings/available_embeddings/mistral.md
new file mode 100644
index 000000000..ee2da3a33
--- /dev/null
+++ b/apps/docs/docs/modules/embeddings/available_embeddings/mistral.md
@@ -0,0 +1,29 @@
+# MistralAI
+
+To use MistralAI embeddings, you need to import `MistralAIEmbedding` from `llamaindex`.
+
+```ts
+import { MistralAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const mistralEmbedModel = new MistralAIEmbedding({
+  apiKey: "<YOUR_API_KEY>",
+});
+
+const serviceContext = serviceContextFromDefaults({
+  embedModel: mistralEmbedModel,
+});
+
+const document = new Document({ text: essay, id_: "essay" });
+
+const index = await VectorStoreIndex.fromDocuments([document], {
+  serviceContext,
+});
+
+const queryEngine = index.asQueryEngine();
+
+const query = "What is the meaning of life?";
+
+const results = await queryEngine.query({
+  query,
+});
+```
diff --git a/apps/docs/docs/modules/embeddings/available_embeddings/ollama.md b/apps/docs/docs/modules/embeddings/available_embeddings/ollama.md
new file mode 100644
index 000000000..7ccd8eeb4
--- /dev/null
+++ b/apps/docs/docs/modules/embeddings/available_embeddings/ollama.md
@@ -0,0 +1,27 @@
+# Ollama
+
+To use Ollama embeddings, you need to import `Ollama` from `llamaindex`.
+
+```ts
+import { Ollama, serviceContextFromDefaults } from "llamaindex";
+
+const ollamaEmbedModel = new Ollama();
+
+const serviceContext = serviceContextFromDefaults({
+  embedModel: ollamaEmbedModel,
+});
+
+const document = new Document({ text: essay, id_: "essay" });
+
+const index = await VectorStoreIndex.fromDocuments([document], {
+  serviceContext,
+});
+
+const queryEngine = index.asQueryEngine();
+
+const query = "What is the meaning of life?";
+
+const results = await queryEngine.query({
+  query,
+});
+```
diff --git a/apps/docs/docs/modules/embeddings/available_embeddings/openai.md b/apps/docs/docs/modules/embeddings/available_embeddings/openai.md
new file mode 100644
index 000000000..20e9c864c
--- /dev/null
+++ b/apps/docs/docs/modules/embeddings/available_embeddings/openai.md
@@ -0,0 +1,27 @@
+# OpenAI
+
+To use OpenAI embeddings, you need to import `OpenAIEmbedding` from `llamaindex`.
+
+```ts
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbedModel = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({
+  embedModel: openaiEmbedModel,
+});
+
+const document = new Document({ text: essay, id_: "essay" });
+
+const index = await VectorStoreIndex.fromDocuments([document], {
+  serviceContext,
+});
+
+const queryEngine = index.asQueryEngine();
+
+const query = "What is the meaning of life?";
+
+const results = await queryEngine.query({
+  query,
+});
+```
diff --git a/apps/docs/docs/modules/embeddings/available_embeddings/together.md b/apps/docs/docs/modules/embeddings/available_embeddings/together.md
new file mode 100644
index 000000000..755709a8b
--- /dev/null
+++ b/apps/docs/docs/modules/embeddings/available_embeddings/together.md
@@ -0,0 +1,29 @@
+# Together
+
+To use together embeddings, you need to import `TogetherEmbedding` from `llamaindex`.
+
+```ts
+import { TogetherEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const togetherEmbedModel = new TogetherEmbedding({
+  apiKey: "<YOUR_API_KEY>",
+});
+
+const serviceContext = serviceContextFromDefaults({
+  embedModel: togetherEmbedModel,
+});
+
+const document = new Document({ text: essay, id_: "essay" });
+
+const index = await VectorStoreIndex.fromDocuments([document], {
+  serviceContext,
+});
+
+const queryEngine = index.asQueryEngine();
+
+const query = "What is the meaning of life?";
+
+const results = await queryEngine.query({
+  query,
+});
+```
diff --git a/apps/docs/docs/modules/embeddings/index.md b/apps/docs/docs/modules/embeddings/index.md
new file mode 100644
index 000000000..cfac6028e
--- /dev/null
+++ b/apps/docs/docs/modules/embeddings/index.md
@@ -0,0 +1,22 @@
+# Embedding
+
+The embedding model in LlamaIndex is responsible for creating numerical representations of text. By default, LlamaIndex will use the `text-embedding-ada-002` model from OpenAI.
+
+This can be explicitly set in the `ServiceContext` object.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## Local Embedding
+
+For local embeddings, you can use the [HuggingFace](./available_embeddings/huggingface.md) embedding model.
+
+## API Reference
+
+- [OpenAIEmbedding](../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../api/interfaces//ServiceContext.md)
diff --git a/apps/docs/docs/modules/llms/index.md b/apps/docs/docs/modules/llms/index.md
index 6db0a800c..84382f956 100644
--- a/apps/docs/docs/modules/llms/index.md
+++ b/apps/docs/docs/modules/llms/index.md
@@ -28,6 +28,10 @@ export AZURE_OPENAI_ENDPOINT="<YOUR ENDPOINT, see https://learn.microsoft.com/en
 export AZURE_OPENAI_DEPLOYMENT="gpt-4" # or some other deployment name
 ```
 
+## Local LLM
+
+For local LLMs, currently we recommend the use of [Ollama](./available_llms/ollama.md) LLM.
+
 ## API Reference
 
 - [OpenAI](../api/classes/OpenAI.md)
-- 
GitLab