From 2b39ceffa6c0533cde15f34b28126a47cf718b32 Mon Sep 17 00:00:00 2001
From: ANKIT VARSHNEY <132201033+AVtheking@users.noreply.github.com>
Date: Wed, 19 Mar 2025 12:45:11 +0530
Subject: [PATCH] docs: doc for structured output (#1761)

---
 .changeset/nervous-cars-own.md                |  5 ++++
 .../docs/llamaindex/modules/llms/ollama.mdx   | 29 +++++++++++++++++++
 .../docs/llamaindex/modules/llms/openai.mdx   | 27 +++++++++++++++++
 3 files changed, 61 insertions(+)
 create mode 100644 .changeset/nervous-cars-own.md

diff --git a/.changeset/nervous-cars-own.md b/.changeset/nervous-cars-own.md
new file mode 100644
index 000000000..f4e117f48
--- /dev/null
+++ b/.changeset/nervous-cars-own.md
@@ -0,0 +1,5 @@
+---
+"@llamaindex/doc": patch
+---
+
+Added documentation for structured output in openai and ollama
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/ollama.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/ollama.mdx
index ed3161f24..4767d285b 100644
--- a/apps/next/src/content/docs/llamaindex/modules/llms/ollama.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/llms/ollama.mdx
@@ -55,6 +55,35 @@ const results = await queryEngine.query({
 });
 ```
 
+## Using JSON Response Format
+
+You can configure Ollama to return responses in JSON format:
+
+```ts
+import { Ollama } from "@llamaindex/llms/ollama";
+import { z } from "zod";
+
+// Simple JSON format
+const llm = new Ollama({ 
+  model: "llama2", 
+  temperature: 0,
+  responseFormat: { type: "json_object" }
+});
+
+// Using Zod schema for validation
+const responseSchema = z.object({
+  summary: z.string(),
+  topics: z.array(z.string()),
+  sentiment: z.enum(["positive", "negative", "neutral"])
+});
+
+const llm = new Ollama({ 
+  model: "llama2", 
+  temperature: 0,
+  responseFormat: responseSchema  
+});
+```
+
 ## Full Example
 
 ```ts
diff --git a/apps/next/src/content/docs/llamaindex/modules/llms/openai.mdx b/apps/next/src/content/docs/llamaindex/modules/llms/openai.mdx
index a861a3501..fb1442333 100644
--- a/apps/next/src/content/docs/llamaindex/modules/llms/openai.mdx
+++ b/apps/next/src/content/docs/llamaindex/modules/llms/openai.mdx
@@ -46,6 +46,33 @@ or
 Settings.llm = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0, apiKey: <YOUR_API_KEY>, baseURL: "https://api.scaleway.ai/v1" });
 ```
 
+## Using JSON Response Format
+
+You can configure OpenAI to return responses in JSON format:
+
+```ts
+Settings.llm = new OpenAI({ 
+  model: "gpt-4o", 
+  temperature: 0,
+  responseFormat: { type: "json_object" }  
+});
+
+// You can also use a Zod schema to validate the response structure
+import { z } from "zod";
+
+const responseSchema = z.object({
+  summary: z.string(),  
+  topics: z.array(z.string()),
+  sentiment: z.enum(["positive", "negative", "neutral"])
+});
+
+Settings.llm = new OpenAI({ 
+  model: "gpt-4o", 
+  temperature: 0,
+  responseFormat: responseSchema  
+});
+```
+
 ## Load and index documents
 
 For this example, we will use a single document. In a real-world scenario, you would have multiple documents to index.
-- 
GitLab