diff --git a/apps/simple/vectorIndexCustomize.ts b/apps/simple/vectorIndexCustomize.ts
new file mode 100644
index 0000000000000000000000000000000000000000..49d5ca4d80be166291502b58f65d2090410ab775
--- /dev/null
+++ b/apps/simple/vectorIndexCustomize.ts
@@ -0,0 +1,18 @@
+import { Document, VectorStoreIndex, RetrieverQueryEngine } from "llamaindex";
+import essay from "./essay";
+
+// Customize retrieval and query args
+async function main() {
+  const document = new Document({ text: essay });
+  const index = await VectorStoreIndex.fromDocuments([document]);
+  const retriever = index.asRetriever();
+  retriever.similarityTopK = 5;
+  // TODO: cannot pass responseSynthesizer into retriever query engine
+  const queryEngine = new RetrieverQueryEngine(retriever);
+  const response = await queryEngine.query(
+    "What did the author do growing up?"
+  );
+  console.log(response.response);
+}
+
+main().catch(console.error);
diff --git a/packages/core/src/LLM.ts b/packages/core/src/LLM.ts
index 20b706610fa2677e0f377e84baa9acae35766cc0..0913d64fc0bb894c1e32aae92f81488ad2b3d361 100644
--- a/packages/core/src/LLM.ts
+++ b/packages/core/src/LLM.ts
@@ -78,8 +78,7 @@ export class OpenAI implements LLM {
     this.temperature = init?.temperature ?? 0;
     this.requestTimeout = init?.requestTimeout ?? null;
     this.maxRetries = init?.maxRetries ?? 10;
-    this.maxTokens =
-      init?.maxTokens ?? Math.floor(ALL_AVAILABLE_MODELS[this.model] / 2);
+    this.maxTokens = init?.maxTokens ?? undefined;
     this.openAIKey = init?.openAIKey ?? null;
     this.session = init?.session ?? getOpenAISession();
     this.callbackManager = init?.callbackManager;