From 73c188761f3a32b4817e2589b6702ead1e46ae44 Mon Sep 17 00:00:00 2001
From: Logan Markewich <logan.markewich@live.com>
Date: Thu, 20 Jul 2023 19:08:38 -0600
Subject: [PATCH] fix some docs usage

---
 apps/docs/docs/modules/high_level/query_engine.md | 2 +-
 apps/docs/docs/modules/low_level/embedding.md     | 4 ++--
 apps/docs/docs/modules/low_level/llm.md           | 8 ++++----
 3 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/apps/docs/docs/modules/high_level/query_engine.md b/apps/docs/docs/modules/high_level/query_engine.md
index 20dbe35b2..c6d6452a1 100644
--- a/apps/docs/docs/modules/high_level/query_engine.md
+++ b/apps/docs/docs/modules/high_level/query_engine.md
@@ -8,7 +8,7 @@ A query engine wraps a `Retriever` and a `ResponseSynthesizer` into a pipeline,
 
 ```typescript
 const queryEngine = index.asQueryEngine();
-const response = queryEngine.query("query string");
+const response = await queryEngine.query("query string");
 ```
 
 ## Sub Question Query Engine
diff --git a/apps/docs/docs/modules/low_level/embedding.md b/apps/docs/docs/modules/low_level/embedding.md
index 645f33459..57f672abe 100644
--- a/apps/docs/docs/modules/low_level/embedding.md
+++ b/apps/docs/docs/modules/low_level/embedding.md
@@ -9,11 +9,11 @@ The embedding model in LlamaIndex is responsible for creating numerical represen
 This can be explicitly set in the `ServiceContext` object.
 
 ```typescript
-import { OpenAIEmbedding, ServiceContext } from "llamaindex";
+import { OpenAIEmbedding, serviceContextFromDefaults } from "llamaindex";
 
 const openaiEmbeds = new OpenAIEmbedding();
 
-const serviceContext = new ServiceContext({ embedModel: openaiEmbeds });
+const serviceContext = serviceContextFromDefaults({ embedModel: openaiEmbeds });
 ```
 
 ## API Reference
diff --git a/apps/docs/docs/modules/low_level/llm.md b/apps/docs/docs/modules/low_level/llm.md
index 29e8a8879..4f0ba2db5 100644
--- a/apps/docs/docs/modules/low_level/llm.md
+++ b/apps/docs/docs/modules/low_level/llm.md
@@ -9,14 +9,14 @@ The LLM is responsible for reading text and generating natural language response
 The LLM can be explicitly set in the `ServiceContext` object.
 
 ```typescript
-import { ChatGPTLLMPredictor, ServiceContext } from "llamaindex";
+import { OpenAI, serviceContextFromDefaults } from "llamaindex";
 
-const openaiLLM = new ChatGPTLLMPredictor({ model: "gpt-3.5-turbo" });
+const openaiLLM = new OpenAI({ model: "gpt-3.5-turbo", temperature: 0 });
 
-const serviceContext = new ServiceContext({ llmPredictor: openaiLLM });
+const serviceContext = serviceContextFromDefaults({ llm: openaiLLM });
 ```
 
 ## API Reference
 
-- [ChatGPTLLMPredictor](../../api/classes/ChatGPTLLMPredictor.md)
+- [OpenAI](../../api/classes/OpenAI.md)
 - [ServiceContext](../../api/interfaces/ServiceContext.md)
\ No newline at end of file
-- 
GitLab