diff --git a/apps/docs/docs/modules/embeddings/_category_.yml b/apps/docs/docs/modules/embeddings/_category_.yml
new file mode 100644
index 0000000000000000000000000000000000000000..aa202851b15e22493d9fbc19b71b838d9c52847f
--- /dev/null
+++ b/apps/docs/docs/modules/embeddings/_category_.yml
@@ -0,0 +1,2 @@
+label: "Embeddings"
+position: 3
diff --git a/apps/docs/docs/modules/embeddings/available_embeddings/_category_.yml b/apps/docs/docs/modules/embeddings/available_embeddings/_category_.yml
new file mode 100644
index 0000000000000000000000000000000000000000..d401bda9130344314a4fa6c12f1600cd3f006d46
--- /dev/null
+++ b/apps/docs/docs/modules/embeddings/available_embeddings/_category_.yml
@@ -0,0 +1 @@
+label: "Available Embeddings"
diff --git a/apps/docs/docs/modules/embeddings/available_embeddings/huggingface.md b/apps/docs/docs/modules/embeddings/available_embeddings/huggingface.md
new file mode 100644
index 0000000000000000000000000000000000000000..cb058b033068a6736619fe36c30b26515f3d4100
--- /dev/null
+++ b/apps/docs/docs/modules/embeddings/available_embeddings/huggingface.md
@@ -0,0 +1,25 @@
+# HuggingFace
+
+To use HuggingFace embeddings, you need to import `HuggingFaceEmbedding` from `llamaindex`.
+
+```ts
+import { HuggingFaceEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const huggingFaceEmbeds = new HuggingFaceEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+
+const document = new Document({ text: essay, id_: "essay" });
+
+const index = await VectorStoreIndex.fromDocuments([document], {
+  serviceContext,
+});
+
+const queryEngine = index.asQueryEngine();
+
+const query = "What is the meaning of life?";
+
+const results = await queryEngine.query({
+  query,
+});
+```
diff --git a/apps/docs/docs/modules/embeddings/available_embeddings/mistral.md b/apps/docs/docs/modules/embeddings/available_embeddings/mistral.md
new file mode 100644
index 0000000000000000000000000000000000000000..ee2da3a3329865e3555329f46305b74b80d40223
--- /dev/null
+++ b/apps/docs/docs/modules/embeddings/available_embeddings/mistral.md
@@ -0,0 +1,29 @@
+# MistralAI
+
+To use MistralAI embeddings, you need to import `MistralAIEmbedding` from `llamaindex`.
+
+```ts
+import { MistralAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const mistralEmbedModel = new MistralAIEmbedding({
+  apiKey: "<YOUR_API_KEY>",
+});
+
+const serviceContext = serviceContextFromDefaults({
+  embedModel: mistralEmbedModel,
+});
+
+const document = new Document({ text: essay, id_: "essay" });
+
+const index = await VectorStoreIndex.fromDocuments([document], {
+  serviceContext,
+});
+
+const queryEngine = index.asQueryEngine();
+
+const query = "What is the meaning of life?";
+
+const results = await queryEngine.query({
+  query,
+});
+```
diff --git a/apps/docs/docs/modules/embeddings/available_embeddings/ollama.md b/apps/docs/docs/modules/embeddings/available_embeddings/ollama.md
new file mode 100644
index 0000000000000000000000000000000000000000..7ccd8eeb498048376004b31eb35783e00ff3e502
--- /dev/null
+++ b/apps/docs/docs/modules/embeddings/available_embeddings/ollama.md
@@ -0,0 +1,27 @@
+# Ollama
+
+To use Ollama embeddings, you need to import `Ollama` from `llamaindex`.
+
+```ts
+import { Ollama, serviceContextFromDefaults } from "llamaindex";
+
+const ollamaEmbedModel = new Ollama();
+
+const serviceContext = serviceContextFromDefaults({
+  embedModel: ollamaEmbedModel,
+});
+
+const document = new Document({ text: essay, id_: "essay" });
+
+const index = await VectorStoreIndex.fromDocuments([document], {
+  serviceContext,
+});
+
+const queryEngine = index.asQueryEngine();
+
+const query = "What is the meaning of life?";
+
+const results = await queryEngine.query({
+  query,
+});
+```
diff --git a/apps/docs/docs/modules/embeddings/available_embeddings/openai.md b/apps/docs/docs/modules/embeddings/available_embeddings/openai.md
new file mode 100644
index 0000000000000000000000000000000000000000..20e9c864cd0dbfb4b7c15b98bd29ed0868a6c5ab
--- /dev/null
+++ b/apps/docs/docs/modules/embeddings/available_embeddings/openai.md
@@ -0,0 +1,27 @@
+# OpenAI
+
+To use OpenAI embeddings, you need to import `OpenAIEmbedding` from `llamaindex`.
+
+```ts
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbedModel = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({
+  embedModel: openaiEmbedModel,
+});
+
+const document = new Document({ text: essay, id_: "essay" });
+
+const index = await VectorStoreIndex.fromDocuments([document], {
+  serviceContext,
+});
+
+const queryEngine = index.asQueryEngine();
+
+const query = "What is the meaning of life?";
+
+const results = await queryEngine.query({
+  query,
+});
+```
diff --git a/apps/docs/docs/modules/embeddings/available_embeddings/together.md b/apps/docs/docs/modules/embeddings/available_embeddings/together.md
new file mode 100644
index 0000000000000000000000000000000000000000..755709a8bfe0f88594778eecd16f0e1223fe2119
--- /dev/null
+++ b/apps/docs/docs/modules/embeddings/available_embeddings/together.md
@@ -0,0 +1,29 @@
+# Together
+
+To use together embeddings, you need to import `TogetherEmbedding` from `llamaindex`.
+
+```ts
+import { TogetherEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const togetherEmbedModel = new TogetherEmbedding({
+  apiKey: "<YOUR_API_KEY>",
+});
+
+const serviceContext = serviceContextFromDefaults({
+  embedModel: togetherEmbedModel,
+});
+
+const document = new Document({ text: essay, id_: "essay" });
+
+const index = await VectorStoreIndex.fromDocuments([document], {
+  serviceContext,
+});
+
+const queryEngine = index.asQueryEngine();
+
+const query = "What is the meaning of life?";
+
+const results = await queryEngine.query({
+  query,
+});
+```
diff --git a/apps/docs/docs/modules/embeddings/index.md b/apps/docs/docs/modules/embeddings/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..cfac6028efbad319ae609ecc02714021baa4cb54
--- /dev/null
+++ b/apps/docs/docs/modules/embeddings/index.md
@@ -0,0 +1,22 @@
+# Embedding
+
+The embedding model in LlamaIndex is responsible for creating numerical representations of text. By default, LlamaIndex will use the `text-embedding-ada-002` model from OpenAI.
+
+This can be explicitly set in the `ServiceContext` object.
+
+```typescript
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
+
+const openaiEmbeds = new OpenAIEmbedding();
+
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
+```
+
+## Local Embedding
+
+For local embeddings, you can use the [HuggingFace](./available_embeddings/huggingface.md) embedding model.
+
+## API Reference
+
+- [OpenAIEmbedding](../api/classes/OpenAIEmbedding.md)
+- [ServiceContext](../api/interfaces//ServiceContext.md)
diff --git a/apps/docs/docs/modules/llms/index.md b/apps/docs/docs/modules/llms/index.md
index 6db0a800ca91661d78b32dc78bb1b19e059750f6..84382f95664d34f6451f3610483dd988ae42610c 100644
--- a/apps/docs/docs/modules/llms/index.md
+++ b/apps/docs/docs/modules/llms/index.md
@@ -28,6 +28,10 @@ export AZURE_OPENAI_ENDPOINT="<YOUR ENDPOINT, see https://learn.microsoft.com/en
 export AZURE_OPENAI_DEPLOYMENT="gpt-4" # or some other deployment name
 ```
 
+## Local LLM
+
+For local LLMs, currently we recommend the use of [Ollama](./available_llms/ollama.md) LLM.
+
 ## API Reference
 
 - [OpenAI](../api/classes/OpenAI.md)