From 2619d4175da4425fa45a70ccf2081ca8045f4103 Mon Sep 17 00:00:00 2001
From: Yi Ding <yi.s.ding@gmail.com>
Date: Tue, 18 Jul 2023 23:10:14 -0700
Subject: [PATCH] Fixed the context window issue.

Putting in a max tokens into the request was a mistake.
---
 apps/simple/vectorIndexCustomize.ts | 18 ++++++++++++++++++
 packages/core/src/LLM.ts            |  3 +--
 2 files changed, 19 insertions(+), 2 deletions(-)
 create mode 100644 apps/simple/vectorIndexCustomize.ts

diff --git a/apps/simple/vectorIndexCustomize.ts b/apps/simple/vectorIndexCustomize.ts
new file mode 100644
index 000000000..49d5ca4d8
--- /dev/null
+++ b/apps/simple/vectorIndexCustomize.ts
@@ -0,0 +1,18 @@
+import { Document, VectorStoreIndex, RetrieverQueryEngine } from "llamaindex";
+import essay from "./essay";
+
+// Customize retrieval and query args
+async function main() {
+  const document = new Document({ text: essay });
+  const index = await VectorStoreIndex.fromDocuments([document]);
+  const retriever = index.asRetriever();
+  retriever.similarityTopK = 5;
+  // TODO: cannot pass responseSynthesizer into retriever query engine
+  const queryEngine = new RetrieverQueryEngine(retriever);
+  const response = await queryEngine.query(
+    "What did the author do growing up?"
+  );
+  console.log(response.response);
+}
+
+main().catch(console.error);
diff --git a/packages/core/src/LLM.ts b/packages/core/src/LLM.ts
index 20b706610..0913d64fc 100644
--- a/packages/core/src/LLM.ts
+++ b/packages/core/src/LLM.ts
@@ -78,8 +78,7 @@ export class OpenAI implements LLM {
     this.temperature = init?.temperature ?? 0;
     this.requestTimeout = init?.requestTimeout ?? null;
     this.maxRetries = init?.maxRetries ?? 10;
-    this.maxTokens =
-      init?.maxTokens ?? Math.floor(ALL_AVAILABLE_MODELS[this.model] / 2);
+    this.maxTokens = init?.maxTokens ?? undefined;
     this.openAIKey = init?.openAIKey ?? null;
     this.session = init?.session ?? getOpenAISession();
     this.callbackManager = init?.callbackManager;
-- 
GitLab