diff --git a/apps/docs/docs/modules/llms/_category_.yml b/apps/docs/docs/modules/llms/_category_.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b52ae1313733995288f9bac49d3b6e96ca932521
--- /dev/null
+++ b/apps/docs/docs/modules/llms/_category_.yml
@@ -0,0 +1,2 @@
+label: "LLMs"
+position: 3
diff --git a/apps/docs/docs/modules/llms/available_llms/_category_.yml b/apps/docs/docs/modules/llms/available_llms/_category_.yml
new file mode 100644
index 0000000000000000000000000000000000000000..0d258f5cab9f532853b5b488ccfb30a0ae5daf12
--- /dev/null
+++ b/apps/docs/docs/modules/llms/available_llms/_category_.yml
@@ -0,0 +1 @@
+label: "Available LLMs"
diff --git a/apps/docs/docs/modules/llms/available_llms/anthropic.md b/apps/docs/docs/modules/llms/available_llms/anthropic.md
new file mode 100644
index 0000000000000000000000000000000000000000..dd83f5207fb9e96d07d797a24f52404513e0fa4f
--- /dev/null
+++ b/apps/docs/docs/modules/llms/available_llms/anthropic.md
@@ -0,0 +1,80 @@
+# Anthropic
+
+## Usage
+
+```ts
+import { Anthropic, serviceContextFromDefaults } from "llamaindex";
+
+const anthropicLLM = new Anthropic({
+  apiKey: "<YOUR_API_KEY>",
+});
+
+const serviceContext = serviceContextFromDefaults({ llm: anthropicLLM });
+```
+
+## Load and index documents
+
+For this example, we will use a single document. In a real-world scenario, you would have multiple documents to index.
+
+```ts
+const document = new Document({ text: essay, id_: "essay" });
+
+const index = await VectorStoreIndex.fromDocuments([document], {
+  serviceContext,
+});
+```
+
+## Query
+
+```ts
+const queryEngine = index.asQueryEngine();
+
+const query = "What is the meaning of life?";
+
+const results = await queryEngine.query({
+  query,
+});
+```
+
+## Full Example
+
+```ts
+import {
+  Anthropic,
+  Document,
+  VectorStoreIndex,
+  serviceContextFromDefaults,
+} from "llamaindex";
+
+async function main() {
+  // Create an instance of the Anthropic LLM
+  const anthropicLLM = new Anthropic({
+    apiKey: "<YOUR_API_KEY>",
+  });
+
+  // Create a service context
+  const serviceContext = serviceContextFromDefaults({ llm: anthropicLLM });
+
+  const document = new Document({ text: essay, id_: "essay" });
+
+  // Load and index documents
+  const index = await VectorStoreIndex.fromDocuments([document], {
+    serviceContext,
+  });
+
+  // Create a query engine
+  const queryEngine = index.asQueryEngine({
+    retriever,
+  });
+
+  const query = "What is the meaning of life?";
+
+  // Query
+  const response = await queryEngine.query({
+    query,
+  });
+
+  // Log the response
+  console.log(response.response);
+}
+```
diff --git a/apps/docs/docs/modules/llms/available_llms/azure.md b/apps/docs/docs/modules/llms/available_llms/azure.md
new file mode 100644
index 0000000000000000000000000000000000000000..f6ca3ef6a8c7c264f277c0288ac590a315eb3842
--- /dev/null
+++ b/apps/docs/docs/modules/llms/available_llms/azure.md
@@ -0,0 +1,88 @@
+# Azure OpenAI
+
+To use Azure OpenAI, you only need to set a few environment variables together with the `OpenAI` class.
+
+For example:
+
+## Environment Variables
+
+```
+export AZURE_OPENAI_KEY="<YOUR KEY HERE>"
+export AZURE_OPENAI_ENDPOINT="<YOUR ENDPOINT, see https://learn.microsoft.com/en-us/azure/ai-services/openai/quickstart?tabs=command-line%2Cpython&pivots=rest-api>"
+export AZURE_OPENAI_DEPLOYMENT="gpt-4" # or some other deployment name
+```
+
+## Usage
+
+```ts
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const azureOpenaiLLM = new OpenAI({ model: "gpt-4", temperature: 0 });
+
+const serviceContext = serviceContextFromDefaults({ llm: azureOpenaiLLM });
+```
+
+## Load and index documents
+
+For this example, we will use a single document. In a real-world scenario, you would have multiple documents to index.
+
+```ts
+const document = new Document({ text: essay, id_: "essay" });
+
+const index = await VectorStoreIndex.fromDocuments([document], {
+  serviceContext,
+});
+```
+
+## Query
+
+```ts
+const queryEngine = index.asQueryEngine();
+
+const query = "What is the meaning of life?";
+
+const results = await queryEngine.query({
+  query,
+});
+```
+
+## Full Example
+
+```ts
+import {
+  Anthropic,
+  Document,
+  VectorStoreIndex,
+  serviceContextFromDefaults,
+} from "llamaindex";
+
+async function main() {
+  // Create an instance of the LLM
+  const azureOpenaiLLM = new OpenAI({ model: "gpt-4", temperature: 0 });
+
+  // Create a service context
+  const serviceContext = serviceContextFromDefaults({ llm: azureOpenaiLLM });
+
+  const document = new Document({ text: essay, id_: "essay" });
+
+  // Load and index documents
+  const index = await VectorStoreIndex.fromDocuments([document], {
+    serviceContext,
+  });
+
+  // Create a query engine
+  const queryEngine = index.asQueryEngine({
+    retriever,
+  });
+
+  const query = "What is the meaning of life?";
+
+  // Query
+  const response = await queryEngine.query({
+    query,
+  });
+
+  // Log the response
+  console.log(response.response);
+}
+```
diff --git a/apps/docs/docs/modules/llms/available_llms/llama2.md b/apps/docs/docs/modules/llms/available_llms/llama2.md
new file mode 100644
index 0000000000000000000000000000000000000000..65c26d6279c53f9870f7d20b2645c9dd12bded8b
--- /dev/null
+++ b/apps/docs/docs/modules/llms/available_llms/llama2.md
@@ -0,0 +1,97 @@
+# LLama2
+
+## Usage
+
+```ts
+import { Ollama, serviceContextFromDefaults } from "llamaindex";
+
+const llama2LLM = new LlamaDeuce({ chatStrategy: DeuceChatStrategy.META });
+
+const serviceContext = serviceContextFromDefaults({ llm: llama2LLM });
+```
+
+## Usage with Replication
+
+```ts
+import {
+  Ollama,
+  ReplicateSession,
+  serviceContextFromDefaults,
+} from "llamaindex";
+
+const replicateSession = new ReplicateSession({
+  replicateKey,
+});
+
+const llama2LLM = new LlamaDeuce({
+  chatStrategy: DeuceChatStrategy.META,
+  replicateSession,
+});
+
+const serviceContext = serviceContextFromDefaults({ llm: llama2LLM });
+```
+
+## Load and index documents
+
+For this example, we will use a single document. In a real-world scenario, you would have multiple documents to index.
+
+```ts
+const document = new Document({ text: essay, id_: "essay" });
+
+const index = await VectorStoreIndex.fromDocuments([document], {
+  serviceContext,
+});
+```
+
+## Query
+
+```ts
+const queryEngine = index.asQueryEngine();
+
+const query = "What is the meaning of life?";
+
+const results = await queryEngine.query({
+  query,
+});
+```
+
+## Full Example
+
+```ts
+import {
+  Anthropic,
+  Document,
+  VectorStoreIndex,
+  serviceContextFromDefaults,
+} from "llamaindex";
+
+async function main() {
+  // Create an instance of the LLM
+  const llama2LLM = new LlamaDeuce({ chatStrategy: DeuceChatStrategy.META });
+
+  // Create a service context
+  const serviceContext = serviceContextFromDefaults({ llm: mistralLLM });
+
+  const document = new Document({ text: essay, id_: "essay" });
+
+  // Load and index documents
+  const index = await VectorStoreIndex.fromDocuments([document], {
+    serviceContext,
+  });
+
+  // Create a query engine
+  const queryEngine = index.asQueryEngine({
+    retriever,
+  });
+
+  const query = "What is the meaning of life?";
+
+  // Query
+  const response = await queryEngine.query({
+    query,
+  });
+
+  // Log the response
+  console.log(response.response);
+}
+```
diff --git a/apps/docs/docs/modules/llms/available_llms/mistral.md b/apps/docs/docs/modules/llms/available_llms/mistral.md
new file mode 100644
index 0000000000000000000000000000000000000000..a928d75488fff761afe9a65bcca0244ee78f1f3b
--- /dev/null
+++ b/apps/docs/docs/modules/llms/available_llms/mistral.md
@@ -0,0 +1,79 @@
+# Mistral
+
+## Usage
+
+```ts
+import { Ollama, serviceContextFromDefaults } from "llamaindex";
+
+const mistralLLM = new MistralAI({
+  model: "mistral-tiny",
+  apiKey: "<YOUR_API_KEY>",
+});
+
+const serviceContext = serviceContextFromDefaults({ llm: mistralLLM });
+```
+
+## Load and index documents
+
+For this example, we will use a single document. In a real-world scenario, you would have multiple documents to index.
+
+```ts
+const document = new Document({ text: essay, id_: "essay" });
+
+const index = await VectorStoreIndex.fromDocuments([document], {
+  serviceContext,
+});
+```
+
+## Query
+
+```ts
+const queryEngine = index.asQueryEngine();
+
+const query = "What is the meaning of life?";
+
+const results = await queryEngine.query({
+  query,
+});
+```
+
+## Full Example
+
+```ts
+import {
+  Anthropic,
+  Document,
+  VectorStoreIndex,
+  serviceContextFromDefaults,
+} from "llamaindex";
+
+async function main() {
+  // Create an instance of the LLM
+  const mistralLLM = new MistralAI({ model: "mistral-tiny" });
+
+  // Create a service context
+  const serviceContext = serviceContextFromDefaults({ llm: mistralLLM });
+
+  const document = new Document({ text: essay, id_: "essay" });
+
+  // Load and index documents
+  const index = await VectorStoreIndex.fromDocuments([document], {
+    serviceContext,
+  });
+
+  // Create a query engine
+  const queryEngine = index.asQueryEngine({
+    retriever,
+  });
+
+  const query = "What is the meaning of life?";
+
+  // Query
+  const response = await queryEngine.query({
+    query,
+  });
+
+  // Log the response
+  console.log(response.response);
+}
+```
diff --git a/apps/docs/docs/modules/llms/available_llms/ollama.md b/apps/docs/docs/modules/llms/available_llms/ollama.md
new file mode 100644
index 0000000000000000000000000000000000000000..d92e2841fe9fd980545786f9ac93963791e1a043
--- /dev/null
+++ b/apps/docs/docs/modules/llms/available_llms/ollama.md
@@ -0,0 +1,76 @@
+# Ollama
+
+## Usage
+
+```ts
+import { Ollama, serviceContextFromDefaults } from "llamaindex";
+
+const ollamaLLM = new Ollama({ model: "llama2", temperature: 0.75 });
+
+const serviceContext = serviceContextFromDefaults({ llm: ollamaLLM });
+```
+
+## Load and index documents
+
+For this example, we will use a single document. In a real-world scenario, you would have multiple documents to index.
+
+```ts
+const document = new Document({ text: essay, id_: "essay" });
+
+const index = await VectorStoreIndex.fromDocuments([document], {
+  serviceContext,
+});
+```
+
+## Query
+
+```ts
+const queryEngine = index.asQueryEngine();
+
+const query = "What is the meaning of life?";
+
+const results = await queryEngine.query({
+  query,
+});
+```
+
+## Full Example
+
+```ts
+import {
+  Anthropic,
+  Document,
+  VectorStoreIndex,
+  serviceContextFromDefaults,
+} from "llamaindex";
+
+async function main() {
+  // Create an instance of the LLM
+  const ollamaLLM = new Ollama({ model: "llama2", temperature: 0.75 });
+
+  // Create a service context
+  const serviceContext = serviceContextFromDefaults({ llm: ollamaLLM });
+
+  const document = new Document({ text: essay, id_: "essay" });
+
+  // Load and index documents
+  const index = await VectorStoreIndex.fromDocuments([document], {
+    serviceContext,
+  });
+
+  // Create a query engine
+  const queryEngine = index.asQueryEngine({
+    retriever,
+  });
+
+  const query = "What is the meaning of life?";
+
+  // Query
+  const response = await queryEngine.query({
+    query,
+  });
+
+  // Log the response
+  console.log(response.response);
+}
+```
diff --git a/apps/docs/docs/modules/llms/available_llms/openai.md b/apps/docs/docs/modules/llms/available_llms/openai.md
new file mode 100644
index 0000000000000000000000000000000000000000..284e7d5a143021dc5e99d3e6ef039de1971bb2bb
--- /dev/null
+++ b/apps/docs/docs/modules/llms/available_llms/openai.md
@@ -0,0 +1,80 @@
+# OpenAI
+
+```ts
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
+
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0, apiKey: <YOUR_API_KEY> });
+
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+```
+
+You can setup the apiKey on the environment variables, like:
+
+```bash
+export OPENAI_API_KEY="<YOUR_API_KEY>"
+```
+
+## Load and index documents
+
+For this example, we will use a single document. In a real-world scenario, you would have multiple documents to index.
+
+```ts
+const document = new Document({ text: essay, id_: "essay" });
+
+const index = await VectorStoreIndex.fromDocuments([document], {
+  serviceContext,
+});
+```
+
+## Query
+
+```ts
+const queryEngine = index.asQueryEngine();
+
+const query = "What is the meaning of life?";
+
+const results = await queryEngine.query({
+  query,
+});
+```
+
+## Full Example
+
+```ts
+import {
+  Anthropic,
+  Document,
+  VectorStoreIndex,
+  serviceContextFromDefaults,
+} from "llamaindex";
+
+async function main() {
+  // Create an instance of the LLM
+  const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
+
+  // Create a service context
+  const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
+
+  const document = new Document({ text: essay, id_: "essay" });
+
+  // Load and index documents
+  const index = await VectorStoreIndex.fromDocuments([document], {
+    serviceContext,
+  });
+
+  // Create a query engine
+  const queryEngine = index.asQueryEngine({
+    retriever,
+  });
+
+  const query = "What is the meaning of life?";
+
+  // Query
+  const response = await queryEngine.query({
+    query,
+  });
+
+  // Log the response
+  console.log(response.response);
+}
+```
diff --git a/apps/docs/docs/modules/llms/available_llms/portkey.md b/apps/docs/docs/modules/llms/available_llms/portkey.md
new file mode 100644
index 0000000000000000000000000000000000000000..7c7720f9f64a603e442fb1b8c6d50d2fe5939587
--- /dev/null
+++ b/apps/docs/docs/modules/llms/available_llms/portkey.md
@@ -0,0 +1,80 @@
+# Portkey LLM
+
+## Usage
+
+```ts
+import { Portkey, serviceContextFromDefaults } from "llamaindex";
+
+const portkeyLLM = new Portkey({
+  apiKey: "<YOUR_API_KEY>",
+});
+
+const serviceContext = serviceContextFromDefaults({ llm: portkeyLLM });
+```
+
+## Load and index documents
+
+For this example, we will use a single document. In a real-world scenario, you would have multiple documents to index.
+
+```ts
+const document = new Document({ text: essay, id_: "essay" });
+
+const index = await VectorStoreIndex.fromDocuments([document], {
+  serviceContext,
+});
+```
+
+## Query
+
+```ts
+const queryEngine = index.asQueryEngine();
+
+const query = "What is the meaning of life?";
+
+const results = await queryEngine.query({
+  query,
+});
+```
+
+## Full Example
+
+```ts
+import {
+  Anthropic,
+  Document,
+  VectorStoreIndex,
+  serviceContextFromDefaults,
+} from "llamaindex";
+
+async function main() {
+  // Create an instance of the LLM
+  const portkeyLLM = new Portkey({
+    apiKey: "<YOUR_API_KEY>",
+  });
+
+  // Create a service context
+  const serviceContext = serviceContextFromDefaults({ llm: portkeyLLM });
+
+  const document = new Document({ text: essay, id_: "essay" });
+
+  // Load and index documents
+  const index = await VectorStoreIndex.fromDocuments([document], {
+    serviceContext,
+  });
+
+  // Create a query engine
+  const queryEngine = index.asQueryEngine({
+    retriever,
+  });
+
+  const query = "What is the meaning of life?";
+
+  // Query
+  const response = await queryEngine.query({
+    query,
+  });
+
+  // Log the response
+  console.log(response.response);
+}
+```
diff --git a/apps/docs/docs/modules/llms/available_llms/together.md b/apps/docs/docs/modules/llms/available_llms/together.md
new file mode 100644
index 0000000000000000000000000000000000000000..620adf8a9dd6a28d2dea4f0cd173032e2797c0cc
--- /dev/null
+++ b/apps/docs/docs/modules/llms/available_llms/together.md
@@ -0,0 +1,80 @@
+# Together LLM
+
+## Usage
+
+```ts
+import { TogetherLLM, serviceContextFromDefaults } from "llamaindex";
+
+const togetherLLM = new TogetherLLM({
+  apiKey: "<YOUR_API_KEY>",
+});
+
+const serviceContext = serviceContextFromDefaults({ llm: togetherLLM });
+```
+
+## Load and index documents
+
+For this example, we will use a single document. In a real-world scenario, you would have multiple documents to index.
+
+```ts
+const document = new Document({ text: essay, id_: "essay" });
+
+const index = await VectorStoreIndex.fromDocuments([document], {
+  serviceContext,
+});
+```
+
+## Query
+
+```ts
+const queryEngine = index.asQueryEngine();
+
+const query = "What is the meaning of life?";
+
+const results = await queryEngine.query({
+  query,
+});
+```
+
+## Full Example
+
+```ts
+import {
+  Anthropic,
+  Document,
+  VectorStoreIndex,
+  serviceContextFromDefaults,
+} from "llamaindex";
+
+async function main() {
+  // Create an instance of the LLM
+  const togetherLLM = new TogetherLLM({
+    apiKey: "<YOUR_API_KEY>",
+  });
+
+  // Create a service context
+  const serviceContext = serviceContextFromDefaults({ llm: togetherLLM });
+
+  const document = new Document({ text: essay, id_: "essay" });
+
+  // Load and index documents
+  const index = await VectorStoreIndex.fromDocuments([document], {
+    serviceContext,
+  });
+
+  // Create a query engine
+  const queryEngine = index.asQueryEngine({
+    retriever,
+  });
+
+  const query = "What is the meaning of life?";
+
+  // Query
+  const response = await queryEngine.query({
+    query,
+  });
+
+  // Log the response
+  console.log(response.response);
+}
+```
diff --git a/apps/docs/docs/modules/llm.md b/apps/docs/docs/modules/llms/index.md
similarity index 96%
rename from apps/docs/docs/modules/llm.md
rename to apps/docs/docs/modules/llms/index.md
index d7e0234e01cd8b8e7f6edb414258eda37b5d487c..6db0a800ca91661d78b32dc78bb1b19e059750f6 100644
--- a/apps/docs/docs/modules/llm.md
+++ b/apps/docs/docs/modules/llms/index.md
@@ -2,7 +2,7 @@
 sidebar_position: 3
 ---
 
-# LLM
+# Large Language Models (LLMs)
 
 The LLM is responsible for reading text and generating natural language responses to queries. By default, LlamaIndex.TS uses `gpt-3.5-turbo`.
 
diff --git a/examples/anthropic.ts b/examples/anthropic.ts
index 0f5f9e889375c97de8e07d691190bc7d8142e041..8b1afc107ef52c2fd90b1097c35a8ec102e6a898 100644
--- a/examples/anthropic.ts
+++ b/examples/anthropic.ts
@@ -1,7 +1,9 @@
 import { Anthropic } from "llamaindex";
 
 (async () => {
-  const anthropic = new Anthropic();
+  const anthropic = new Anthropic({
+    apiKey: process.env.ANTHROPIC_API_KEY,
+  });
   const result = await anthropic.chat({
     messages: [
       { content: "You want to talk in rhymes.", role: "system" },