diff --git a/.changeset/weak-students-pay.md b/.changeset/weak-students-pay.md
new file mode 100644
index 0000000000000000000000000000000000000000..e40a3f712002641d5130dde7e18e70314634fccb
--- /dev/null
+++ b/.changeset/weak-students-pay.md
@@ -0,0 +1,5 @@
+---
+"create-llama": patch
+---
+
+Use the retrieval defaults from LlamaCloud
diff --git a/helpers/env-variables.ts b/helpers/env-variables.ts
index 222b36c6d333a5a75cecc28986ebdeee4ae10476..77b60ac46aee36e8e0fdb120500c8f376005641b 100644
--- a/helpers/env-variables.ts
+++ b/helpers/env-variables.ts
@@ -396,7 +396,6 @@ const getEngineEnvs = (): EnvVar[] => {
       name: "TOP_K",
       description:
         "The number of similar embeddings to return when retrieving documents.",
-      value: "3",
     },
     {
       name: "STREAM_TIMEOUT",
diff --git a/templates/components/engines/python/agent/engine.py b/templates/components/engines/python/agent/engine.py
index 3efc1314fad86880d1dd40d1543337513fd5598e..854757e2b558e8d260faaff87cfdd9e764d25656 100644
--- a/templates/components/engines/python/agent/engine.py
+++ b/templates/components/engines/python/agent/engine.py
@@ -9,14 +9,14 @@ from llama_index.core.tools.query_engine import QueryEngineTool
 
 def get_chat_engine(filters=None, params=None):
     system_prompt = os.getenv("SYSTEM_PROMPT")
-    top_k = os.getenv("TOP_K", "3")
+    top_k = int(os.getenv("TOP_K", 0))
     tools = []
 
     # Add query tool if index exists
     index = get_index()
     if index is not None:
         query_engine = index.as_query_engine(
-            similarity_top_k=int(top_k), filters=filters
+            filters=filters, **({"similarity_top_k": top_k} if top_k != 0 else {})
         )
         query_engine_tool = QueryEngineTool.from_defaults(query_engine=query_engine)
         tools.append(query_engine_tool)
diff --git a/templates/components/engines/python/chat/engine.py b/templates/components/engines/python/chat/engine.py
index b1fd361c2411a79c3d0c3481cd6e7540a097dacd..61fc7aad8e21a493b8c4db40dd67ec40f9551f91 100644
--- a/templates/components/engines/python/chat/engine.py
+++ b/templates/components/engines/python/chat/engine.py
@@ -9,7 +9,7 @@ from llama_index.core.chat_engine import CondensePlusContextChatEngine
 def get_chat_engine(filters=None, params=None):
     system_prompt = os.getenv("SYSTEM_PROMPT")
     citation_prompt = os.getenv("SYSTEM_CITATION_PROMPT", None)
-    top_k = int(os.getenv("TOP_K", 3))
+    top_k = int(os.getenv("TOP_K", 0))
 
     node_postprocessors = []
     if citation_prompt:
@@ -26,8 +26,7 @@ def get_chat_engine(filters=None, params=None):
         )
 
     retriever = index.as_retriever(
-        similarity_top_k=top_k,
-        filters=filters,
+        filters=filters, **({"similarity_top_k": top_k} if top_k != 0 else {})
     )
 
     return CondensePlusContextChatEngine.from_defaults(
diff --git a/templates/components/engines/typescript/chat/chat.ts b/templates/components/engines/typescript/chat/chat.ts
index e60c797050a6368e86ac50db09672888492c7478..c0841aa5bdd1cc609303322cdfdf633dfea3df7b 100644
--- a/templates/components/engines/typescript/chat/chat.ts
+++ b/templates/components/engines/typescript/chat/chat.ts
@@ -10,7 +10,7 @@ export async function createChatEngine(documentIds?: string[], params?: any) {
     );
   }
   const retriever = index.asRetriever({
-    similarityTopK: process.env.TOP_K ? parseInt(process.env.TOP_K) : 3,
+    similarityTopK: process.env.TOP_K ? parseInt(process.env.TOP_K) : undefined,
     filters: generateFilters(documentIds || []),
   });
 
diff --git a/templates/types/extractor/fastapi/app/engine/engine.py b/templates/types/extractor/fastapi/app/engine/engine.py
index fbd92f67be2fcfed303549258db5c6812cc6ce75..07e5f5edabc1dc208d5fd72ff0a62a9f8f8d584d 100644
--- a/templates/types/extractor/fastapi/app/engine/engine.py
+++ b/templates/types/extractor/fastapi/app/engine/engine.py
@@ -7,7 +7,7 @@ from app.engine.index import get_index
 
 
 def get_query_engine(output_cls):
-    top_k = os.getenv("TOP_K", 3)
+    top_k = int(os.getenv("TOP_K", 0))
 
     index = get_index()
     if index is None:
@@ -21,7 +21,7 @@ def get_query_engine(output_cls):
     sllm = Settings.llm.as_structured_llm(output_cls)
 
     return index.as_query_engine(
-        similarity_top_k=int(top_k),
         llm=sllm,
         response_mode="tree_summarize",
+        **({"similarity_top_k": top_k} if top_k != 0 else {}),
     )
diff --git a/templates/types/multiagent/fastapi/app/agents/query_engine/agent.py b/templates/types/multiagent/fastapi/app/agents/query_engine/agent.py
index bee1f017e901e5b8290af08272165810d619be5f..4ed24e5e572ce63f524838b7e510638eec3ec136 100644
--- a/templates/types/multiagent/fastapi/app/agents/query_engine/agent.py
+++ b/templates/types/multiagent/fastapi/app/agents/query_engine/agent.py
@@ -19,7 +19,10 @@ def get_query_engine_tool() -> QueryEngineTool:
     index = get_index()
     if index is None:
         raise ValueError("Index not found. Please create an index first.")
-    query_engine = index.as_query_engine(similarity_top_k=int(os.getenv("TOP_K", 3)))
+    top_k = int(os.getenv("TOP_K", 0))
+    query_engine = index.as_query_engine(
+        **({"similarity_top_k": top_k} if top_k != 0 else {})
+    )
     return QueryEngineTool(
         query_engine=query_engine,
         metadata=ToolMetadata(